From ac370a73710901152a2197b91a1bbe997aafd34c Mon Sep 17 00:00:00 2001 From: sayantn Date: Sun, 7 Jul 2024 16:02:36 +0530 Subject: [PATCH 01/11] AVX512FP16 Part 0: Types --- crates/core_arch/src/lib.rs | 3 +- crates/core_arch/src/simd.rs | 73 +++++++++++++++++++++- crates/core_arch/src/x86/mod.rs | 79 ++++++++++++++++++++++++ crates/core_arch/src/x86/test.rs | 33 ++++++++++ crates/stdarch-verify/src/lib.rs | 4 ++ crates/stdarch-verify/tests/x86-intel.rs | 19 ++++++ 6 files changed, 208 insertions(+), 3 deletions(-) diff --git a/crates/core_arch/src/lib.rs b/crates/core_arch/src/lib.rs index 1901149074..a7a02783e0 100644 --- a/crates/core_arch/src/lib.rs +++ b/crates/core_arch/src/lib.rs @@ -34,7 +34,8 @@ target_feature_11, generic_arg_infer, asm_experimental_arch, - sha512_sm_x86 + sha512_sm_x86, + f16 )] #![cfg_attr(test, feature(test, abi_vectorcall, stdarch_internal))] #![deny(clippy::missing_inline_in_public_items)] diff --git a/crates/core_arch/src/simd.rs b/crates/core_arch/src/simd.rs index 4c637f49f3..3082334102 100644 --- a/crates/core_arch/src/simd.rs +++ b/crates/core_arch/src/simd.rs @@ -3,9 +3,10 @@ #![allow(non_camel_case_types)] macro_rules! simd_ty { - ($id:ident [$ety:ident]: $($elem_name:ident),*) => { + ($(#[$stability:meta])? $id:ident [$ety:ident]: $($elem_name:ident),*) => { #[repr(simd)] #[derive(Copy, Clone, Debug, PartialEq)] + $(#[$stability])? pub(crate) struct $id { $(pub $elem_name: $ety),* } #[allow(clippy::use_self)] @@ -186,9 +187,20 @@ simd_ty!( simd_ty!(i32x4[i32]: x0, x1, x2, x3); simd_ty!(i64x2[i64]: x0, x1); +simd_ty!( + #[unstable(feature = "f16", issue = "116909")] + f16x8[f16]: + x0, + x1, + x2, + x3, + x4, + x5, + x6, + x7 +); simd_ty!(f32x4[f32]: x0, x1, x2, x3); simd_ty!(f64x2[f64]: x0, x1); -simd_ty!(f64x4[f64]: x0, x1, x2, x3); simd_m_ty!( m8x16[i8]: @@ -359,6 +371,26 @@ simd_ty!( ); simd_ty!(i64x4[i64]: x0, x1, x2, x3); +simd_ty!( + #[unstable(feature = "f16", issue = "116909")] + f16x16[f16]: + x0, + x1, + x2, + x3, + x4, + x5, + x6, + x7, + x8, + x9, + x10, + x11, + x12, + x13, + x14, + x15 +); simd_ty!( f32x8[f32]: x0, @@ -370,6 +402,7 @@ simd_ty!( x6, x7 ); +simd_ty!(f64x4[f64]: x0, x1, x2, x3); simd_m_ty!( m8x32[i8]: @@ -688,6 +721,42 @@ simd_ty!( x15 ); +simd_ty!( + #[unstable(feature = "f16", issue = "116909")] + f16x32[f16]: + x0, + x1, + x2, + x3, + x4, + x5, + x6, + x7, + x8, + x9, + x10, + x11, + x12, + x13, + x14, + x15, + x16, + x17, + x18, + x19, + x20, + x21, + x22, + x23, + x24, + x25, + x26, + x27, + x28, + x29, + x30, + x31 +); simd_ty!( f32x16[f32]: x0, diff --git a/crates/core_arch/src/x86/mod.rs b/crates/core_arch/src/x86/mod.rs index 9365fe10a2..d3d4381cc7 100644 --- a/crates/core_arch/src/x86/mod.rs +++ b/crates/core_arch/src/x86/mod.rs @@ -335,6 +335,41 @@ types! { u16, u16, u16, u16, u16, u16, u16, u16, u16, u16, u16, u16, u16, u16, u16, u16 ); + + /// 128-bit wide set of 8 `f16` types, x86-specific + /// + /// This type is the same as the `__m128h` type defined by Intel, + /// representing a 128-bit SIMD register which internally is consisted of + /// 8 packed `f16` instances. its purpose is for f16 related intrinsic + /// implementations. + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] + pub struct __m128h(f16, f16, f16, f16, f16, f16, f16, f16); + + /// 256-bit wide set of 16 `f16` types, x86-specific + /// + /// This type is the same as the `__m256h` type defined by Intel, + /// representing a 256-bit SIMD register which internally is consisted of + /// 16 packed `f16` instances. its purpose is for f16 related intrinsic + /// implementations. + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] + pub struct __m256h( + f16, f16, f16, f16, f16, f16, f16, f16, + f16, f16, f16, f16, f16, f16, f16, f16 + ); + + /// 512-bit wide set of 32 `f16` types, x86-specific + /// + /// This type is the same as the `__m512h` type defined by Intel, + /// representing a 512-bit SIMD register which internally is consisted of + /// 32 packed `f16` instances. its purpose is for f16 related intrinsic + /// implementations. + #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] + pub struct __m512h( + f16, f16, f16, f16, f16, f16, f16, f16, + f16, f16, f16, f16, f16, f16, f16, f16, + f16, f16, f16, f16, f16, f16, f16, f16, + f16, f16, f16, f16, f16, f16, f16, f16 + ); } /// The BFloat16 type used in AVX-512 intrinsics. @@ -761,6 +796,50 @@ impl m512bhExt for __m512bh { } } +#[allow(non_camel_case_types)] +pub(crate) trait m128hExt: Sized { + fn as_m128h(self) -> __m128h; + + #[inline] + fn as_f16x8(self) -> crate::core_arch::simd::f16x8 { + unsafe { transmute(self.as_m128h()) } + } +} + +impl m128hExt for __m128h { + #[inline] + fn as_m128h(self) -> Self { + self + } +} + +#[allow(non_camel_case_types)] +pub(crate) trait m256hExt: Sized { + fn as_m256h(self) -> __m256h; + + #[inline] + fn as_f16x16(self) -> crate::core_arch::simd::f16x16 { + unsafe { transmute(self.as_m256h()) } + } +} + +impl m256hExt for __m256h { + #[inline] + fn as_m256h(self) -> Self { + self + } +} + +#[allow(non_camel_case_types)] +pub(crate) trait m512hExt: Sized { + fn as_m512h(self) -> __m512h; + + #[inline] + fn as_f16x32(self) -> crate::core_arch::simd::f16x32 { + unsafe { transmute(self.as_m512h()) } + } +} + mod eflags; #[stable(feature = "simd_x86", since = "1.27.0")] pub use self::eflags::*; diff --git a/crates/core_arch/src/x86/test.rs b/crates/core_arch/src/x86/test.rs index 2c88650af3..ebb67356a4 100644 --- a/crates/core_arch/src/x86/test.rs +++ b/crates/core_arch/src/x86/test.rs @@ -36,6 +36,17 @@ pub unsafe fn get_m128(a: __m128, idx: usize) -> f32 { transmute::<_, [f32; 4]>(a)[idx] } +#[track_caller] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn assert_eq_m128h(a: __m128h, b: __m128h) { + // FIXME: use `_mm_cmp_ph_mask::<_CMP_EQ_OQ>` when it's implemented + let r = _mm_cmpeq_epi16_mask(transmute(a), transmute(b)); + if r != 0b1111_1111 { + panic!("{:?} != {:?}", a, b); + } +} + // not actually an intrinsic but useful in various tests as we proted from // `i64x2::new` which is backwards from `_mm_set_epi64x` #[target_feature(enable = "sse2")] @@ -77,6 +88,17 @@ pub unsafe fn get_m256(a: __m256, idx: usize) -> f32 { transmute::<_, [f32; 8]>(a)[idx] } +#[track_caller] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn assert_eq_m256h(a: __m256h, b: __m256h) { + // FIXME: use `_mm256_cmp_ph_mask::<_CMP_EQ_OQ>` when it's implemented + let r = _mm256_cmpeq_epi16_mask(transmute(a), transmute(b)); + if r != 0b11111111_11111111 { + panic!("{:?} != {:?}", a, b); + } +} + #[target_feature(enable = "avx512f")] pub unsafe fn get_m512(a: __m512, idx: usize) -> f32 { transmute::<_, [f32; 16]>(a)[idx] @@ -139,3 +161,14 @@ pub unsafe fn assert_eq_m512d(a: __m512d, b: __m512d) { panic!("{:?} != {:?}", a, b); } } + +#[track_caller] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn assert_eq_m512h(a: __m512h, b: __m512h) { + // FIXME: use `_mm512_cmp_ph_mask::<_CMP_EQ_OQ>` when it's implemented + let r = _mm512_cmpeq_epi16_mask(transmute(a), transmute(b)); + if r != 0b11111111_11111111_11111111_11111111 { + panic!("{:?} != {:?}", a, b); + } +} diff --git a/crates/stdarch-verify/src/lib.rs b/crates/stdarch-verify/src/lib.rs index 106aeabdb0..efb5d50e26 100644 --- a/crates/stdarch-verify/src/lib.rs +++ b/crates/stdarch-verify/src/lib.rs @@ -182,14 +182,17 @@ fn to_type(t: &syn::Type) -> proc_macro2::TokenStream { "__m128" => quote! { &M128 }, "__m128bh" => quote! { &M128BH }, "__m128d" => quote! { &M128D }, + "__m128h" => quote! { &M128H }, "__m128i" => quote! { &M128I }, "__m256" => quote! { &M256 }, "__m256bh" => quote! { &M256BH }, "__m256d" => quote! { &M256D }, + "__m256h" => quote! { &M256H }, "__m256i" => quote! { &M256I }, "__m512" => quote! { &M512 }, "__m512bh" => quote! { &M512BH }, "__m512d" => quote! { &M512D }, + "__m512h" => quote! { &M512H }, "__m512i" => quote! { &M512I }, "__mmask8" => quote! { &MMASK8 }, "__mmask16" => quote! { &MMASK16 }, @@ -201,6 +204,7 @@ fn to_type(t: &syn::Type) -> proc_macro2::TokenStream { "_MM_PERM_ENUM" => quote! { &MM_PERM_ENUM }, "bool" => quote! { &BOOL }, "bf16" => quote! { &BF16 }, + "f16" => quote! { &F16 }, "f32" => quote! { &F32 }, "f64" => quote! { &F64 }, "i16" => quote! { &I16 }, diff --git a/crates/stdarch-verify/tests/x86-intel.rs b/crates/stdarch-verify/tests/x86-intel.rs index d035b4edff..fadaa6a4b1 100644 --- a/crates/stdarch-verify/tests/x86-intel.rs +++ b/crates/stdarch-verify/tests/x86-intel.rs @@ -24,6 +24,7 @@ struct Function { } static BF16: Type = Type::BFloat16; +static F16: Type = Type::PrimFloat(16); static F32: Type = Type::PrimFloat(32); static F64: Type = Type::PrimFloat(64); static I8: Type = Type::PrimSigned(8); @@ -41,14 +42,17 @@ static M128: Type = Type::M128; static M128BH: Type = Type::M128BH; static M128I: Type = Type::M128I; static M128D: Type = Type::M128D; +static M128H: Type = Type::M128H; static M256: Type = Type::M256; static M256BH: Type = Type::M256BH; static M256I: Type = Type::M256I; static M256D: Type = Type::M256D; +static M256H: Type = Type::M256H; static M512: Type = Type::M512; static M512BH: Type = Type::M512BH; static M512I: Type = Type::M512I; static M512D: Type = Type::M512D; +static M512H: Type = Type::M512H; static MMASK8: Type = Type::MMASK8; static MMASK16: Type = Type::MMASK16; static MMASK32: Type = Type::MMASK32; @@ -73,14 +77,17 @@ enum Type { M128, M128BH, M128D, + M128H, M128I, M256, M256BH, M256D, + M256H, M256I, M512, M512BH, M512D, + M512H, M512I, MMASK8, MMASK16, @@ -221,13 +228,16 @@ fn verify_all_signatures() { "_mm_undefined_ps", "_mm_undefined_pd", "_mm_undefined_si128", + "_mm_undefined_ph", "_mm256_undefined_ps", "_mm256_undefined_pd", "_mm256_undefined_si256", + "_mm256_undefined_ph", "_mm512_undefined_ps", "_mm512_undefined_pd", "_mm512_undefined_epi32", "_mm512_undefined", + "_mm512_undefined_ph", // Has doc-tests instead "_mm256_shuffle_epi32", "_mm256_unpackhi_epi8", @@ -483,6 +493,9 @@ fn matches(rust: &Function, intel: &Intrinsic) -> Result<(), String> { // The XML file names BF16 as "avx512_bf16", while Rust calls // it "avx512bf16". "avx512_bf16" => String::from("avx512bf16"), + // The XML file names FP16 as "avx512_fp16", while Rust calls + // it "avx512fp16". + "avx512_fp16" => String::from("avx512fp16"), // The XML file names AVX-VNNI as "avx_vnni", while Rust calls // it "avxvnni" "avx_vnni" => String::from("avxvnni"), @@ -709,6 +722,7 @@ fn equate( } } match (t, &intel[..]) { + (&Type::PrimFloat(16), "_Float16") => {} (&Type::PrimFloat(32), "float") => {} (&Type::PrimFloat(64), "double") => {} (&Type::PrimSigned(8), "__int8" | "char") => {} @@ -728,14 +742,17 @@ fn equate( (&Type::M128BH, "__m128bh") => {} (&Type::M128I, "__m128i") => {} (&Type::M128D, "__m128d") => {} + (&Type::M128H, "__m128h") => {} (&Type::M256, "__m256") => {} (&Type::M256BH, "__m256bh") => {} (&Type::M256I, "__m256i") => {} (&Type::M256D, "__m256d") => {} + (&Type::M256H, "__m256h") => {} (&Type::M512, "__m512") => {} (&Type::M512BH, "__m512bh") => {} (&Type::M512I, "__m512i") => {} (&Type::M512D, "__m512d") => {} + (&Type::M512H, "__m512h") => {} (&Type::MMASK64, "__mmask64") => {} (&Type::MMASK32, "__mmask32") => {} (&Type::MMASK16, "__mmask16") => {} @@ -771,6 +788,7 @@ fn equate( (&Type::MutPtr(&Type::M512D), "__m512d*") => {} (&Type::ConstPtr(_), "void const*") => {} + (&Type::ConstPtr(&Type::PrimFloat(16)), "_Float16 const*") => {} (&Type::ConstPtr(&Type::PrimFloat(32)), "float const*") => {} (&Type::ConstPtr(&Type::PrimFloat(64)), "double const*") => {} (&Type::ConstPtr(&Type::PrimSigned(8)), "char const*") => {} @@ -785,6 +803,7 @@ fn equate( (&Type::ConstPtr(&Type::M128BH), "__m128bh const*") => {} (&Type::ConstPtr(&Type::M128I), "__m128i const*") => {} (&Type::ConstPtr(&Type::M128D), "__m128d const*") => {} + (&Type::ConstPtr(&Type::M128H), "__m128h const*") => {} (&Type::ConstPtr(&Type::M256), "__m256 const*") => {} (&Type::ConstPtr(&Type::M256BH), "__m256bh const*") => {} (&Type::ConstPtr(&Type::M256I), "__m256i const*") => {} From 1b093be687c125850a250ecc3f680843ff908be5 Mon Sep 17 00:00:00 2001 From: sayantn Date: Sun, 7 Jul 2024 16:03:17 +0530 Subject: [PATCH 02/11] AVX512FP16 Part 1 Add-Sub-Mul-Div, Load-Store-Move, `comi`, `set` --- crates/core_arch/missing-x86.md | 149 - crates/core_arch/src/x86/avx512fp16.rs | 4004 ++++++++++++++++++++++++ crates/core_arch/src/x86/mod.rs | 4 + 3 files changed, 4008 insertions(+), 149 deletions(-) create mode 100644 crates/core_arch/src/x86/avx512fp16.rs diff --git a/crates/core_arch/missing-x86.md b/crates/core_arch/missing-x86.md index 0916befe04..7bc2456ddd 100644 --- a/crates/core_arch/missing-x86.md +++ b/crates/core_arch/missing-x86.md @@ -53,33 +53,9 @@
["AVX512_FP16"]

- * [ ] [`_mm256_castpd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castpd_ph) - * [ ] [`_mm256_castph128_ph256`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castph128_ph256) - * [ ] [`_mm256_castph256_ph128`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castph256_ph128) - * [ ] [`_mm256_castph_pd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castph_pd) - * [ ] [`_mm256_castph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castph_ps) - * [ ] [`_mm256_castph_si256`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castph_si256) - * [ ] [`_mm256_castps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castps_ph) - * [ ] [`_mm256_castsi256_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castsi256_ph) * [ ] [`_mm256_cvtsh_h`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtsh_h) * [ ] [`_mm256_set1_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set1_pch) - * [ ] [`_mm256_set1_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set1_ph) - * [ ] [`_mm256_set_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set_ph) - * [ ] [`_mm256_setr_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setr_ph) - * [ ] [`_mm256_zextph128_ph256`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_zextph128_ph256) * [ ] [`_mm512_abs_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_abs_ph) - * [ ] [`_mm512_add_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_add_ph) - * [ ] [`_mm512_add_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_add_round_ph) - * [ ] [`_mm512_castpd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castpd_ph) - * [ ] [`_mm512_castph128_ph512`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castph128_ph512) - * [ ] [`_mm512_castph256_ph512`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castph256_ph512) - * [ ] [`_mm512_castph512_ph128`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castph512_ph128) - * [ ] [`_mm512_castph512_ph256`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castph512_ph256) - * [ ] [`_mm512_castph_pd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castph_pd) - * [ ] [`_mm512_castph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castph_ps) - * [ ] [`_mm512_castph_si512`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castph_si512) - * [ ] [`_mm512_castps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castps_ph) - * [ ] [`_mm512_castsi512_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castsi512_ph) * [ ] [`_mm512_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_ph_mask) * [ ] [`_mm512_cmp_round_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_round_ph_mask) * [ ] [`_mm512_cmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmul_pch) @@ -130,8 +106,6 @@ * [ ] [`_mm512_cvtx_roundps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtx_roundps_ph) * [ ] [`_mm512_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtxph_ps) * [ ] [`_mm512_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtxps_ph) - * [ ] [`_mm512_div_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_div_ph) - * [ ] [`_mm512_div_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_div_round_ph) * [ ] [`_mm512_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fcmadd_pch) * [ ] [`_mm512_fcmadd_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fcmadd_round_pch) * [ ] [`_mm512_fcmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fcmul_pch) @@ -157,8 +131,6 @@ * [ ] [`_mm512_getexp_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_getexp_round_ph) * [ ] [`_mm512_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_getmant_ph) * [ ] [`_mm512_getmant_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_getmant_round_ph) - * [ ] [`_mm512_load_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_load_ph) - * [ ] [`_mm512_loadu_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_loadu_ph) * [ ] [`_mm512_mask3_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fcmadd_pch) * [ ] [`_mm512_mask3_fcmadd_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fcmadd_round_pch) * [ ] [`_mm512_mask3_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmadd_pch) @@ -175,8 +147,6 @@ * [ ] [`_mm512_mask3_fnmadd_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fnmadd_round_ph) * [ ] [`_mm512_mask3_fnmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fnmsub_ph) * [ ] [`_mm512_mask3_fnmsub_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fnmsub_round_ph) - * [ ] [`_mm512_mask_add_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_add_ph) - * [ ] [`_mm512_mask_add_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_add_round_ph) * [ ] [`_mm512_mask_blend_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_blend_ph) * [ ] [`_mm512_mask_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_ph_mask) * [ ] [`_mm512_mask_cmp_round_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_round_ph_mask) @@ -227,8 +197,6 @@ * [ ] [`_mm512_mask_cvtx_roundps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtx_roundps_ph) * [ ] [`_mm512_mask_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtxph_ps) * [ ] [`_mm512_mask_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtxps_ph) - * [ ] [`_mm512_mask_div_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_div_ph) - * [ ] [`_mm512_mask_div_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_div_round_ph) * [ ] [`_mm512_mask_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fcmadd_pch) * [ ] [`_mm512_mask_fcmadd_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fcmadd_round_pch) * [ ] [`_mm512_mask_fcmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fcmul_pch) @@ -259,9 +227,7 @@ * [ ] [`_mm512_mask_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_min_ph) * [ ] [`_mm512_mask_min_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_min_round_ph) * [ ] [`_mm512_mask_mul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mul_pch) - * [ ] [`_mm512_mask_mul_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mul_ph) * [ ] [`_mm512_mask_mul_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mul_round_pch) - * [ ] [`_mm512_mask_mul_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mul_round_ph) * [ ] [`_mm512_mask_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_rcp_ph) * [ ] [`_mm512_mask_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_ph) * [ ] [`_mm512_mask_reduce_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_round_ph) @@ -272,10 +238,6 @@ * [ ] [`_mm512_mask_scalef_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_scalef_round_ph) * [ ] [`_mm512_mask_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sqrt_ph) * [ ] [`_mm512_mask_sqrt_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sqrt_round_ph) - * [ ] [`_mm512_mask_sub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sub_ph) - * [ ] [`_mm512_mask_sub_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sub_round_ph) - * [ ] [`_mm512_maskz_add_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_add_ph) - * [ ] [`_mm512_maskz_add_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_add_round_ph) * [ ] [`_mm512_maskz_cmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cmul_pch) * [ ] [`_mm512_maskz_cmul_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cmul_round_pch) * [ ] [`_mm512_maskz_conj_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_conj_pch) @@ -323,8 +285,6 @@ * [ ] [`_mm512_maskz_cvtx_roundps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtx_roundps_ph) * [ ] [`_mm512_maskz_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtxph_ps) * [ ] [`_mm512_maskz_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtxps_ph) - * [ ] [`_mm512_maskz_div_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_div_ph) - * [ ] [`_mm512_maskz_div_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_div_round_ph) * [ ] [`_mm512_maskz_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fcmadd_pch) * [ ] [`_mm512_maskz_fcmadd_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fcmadd_round_pch) * [ ] [`_mm512_maskz_fcmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fcmul_pch) @@ -354,9 +314,7 @@ * [ ] [`_mm512_maskz_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_min_ph) * [ ] [`_mm512_maskz_min_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_min_round_ph) * [ ] [`_mm512_maskz_mul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mul_pch) - * [ ] [`_mm512_maskz_mul_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mul_ph) * [ ] [`_mm512_maskz_mul_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mul_round_pch) - * [ ] [`_mm512_maskz_mul_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mul_round_ph) * [ ] [`_mm512_maskz_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_rcp_ph) * [ ] [`_mm512_maskz_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_reduce_ph) * [ ] [`_mm512_maskz_reduce_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_reduce_round_ph) @@ -367,16 +325,12 @@ * [ ] [`_mm512_maskz_scalef_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_scalef_round_ph) * [ ] [`_mm512_maskz_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sqrt_ph) * [ ] [`_mm512_maskz_sqrt_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sqrt_round_ph) - * [ ] [`_mm512_maskz_sub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sub_ph) - * [ ] [`_mm512_maskz_sub_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sub_round_ph) * [ ] [`_mm512_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_max_ph) * [ ] [`_mm512_max_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_max_round_ph) * [ ] [`_mm512_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_min_ph) * [ ] [`_mm512_min_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_min_round_ph) * [ ] [`_mm512_mul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mul_pch) - * [ ] [`_mm512_mul_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mul_ph) * [ ] [`_mm512_mul_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mul_round_pch) - * [ ] [`_mm512_mul_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mul_round_ph) * [ ] [`_mm512_permutex2var_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutex2var_ph) * [ ] [`_mm512_permutexvar_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutexvar_ph) * [ ] [`_mm512_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_rcp_ph) @@ -392,39 +346,12 @@ * [ ] [`_mm512_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_scalef_ph) * [ ] [`_mm512_scalef_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_scalef_round_ph) * [ ] [`_mm512_set1_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set1_pch) - * [ ] [`_mm512_set1_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set1_ph) - * [ ] [`_mm512_set_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set_ph) - * [ ] [`_mm512_setr_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_setr_ph) - * [ ] [`_mm512_setzero_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_setzero_ph) * [ ] [`_mm512_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sqrt_ph) * [ ] [`_mm512_sqrt_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sqrt_round_ph) - * [ ] [`_mm512_store_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_store_ph) - * [ ] [`_mm512_storeu_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_storeu_ph) - * [ ] [`_mm512_sub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sub_ph) - * [ ] [`_mm512_sub_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sub_round_ph) - * [ ] [`_mm512_undefined_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_undefined_ph) - * [ ] [`_mm512_zextph128_ph512`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_zextph128_ph512) - * [ ] [`_mm512_zextph256_ph512`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_zextph256_ph512) - * [ ] [`_mm_add_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_round_sh) - * [ ] [`_mm_add_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_sh) - * [ ] [`_mm_castpd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castpd_ph) - * [ ] [`_mm_castph_pd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castph_pd) - * [ ] [`_mm_castph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castph_ps) - * [ ] [`_mm_castph_si128`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castph_si128) - * [ ] [`_mm_castps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castps_ph) - * [ ] [`_mm_castsi128_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castsi128_ph) * [ ] [`_mm_cmp_round_sh_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_round_sh_mask) * [ ] [`_mm_cmp_sh_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_sh_mask) * [ ] [`_mm_cmul_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmul_round_sch) * [ ] [`_mm_cmul_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmul_sch) - * [ ] [`_mm_comi_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comi_round_sh) - * [ ] [`_mm_comi_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comi_sh) - * [ ] [`_mm_comieq_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comieq_sh) - * [ ] [`_mm_comige_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comige_sh) - * [ ] [`_mm_comigt_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comigt_sh) - * [ ] [`_mm_comile_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comile_sh) - * [ ] [`_mm_comilt_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comilt_sh) - * [ ] [`_mm_comineq_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comineq_sh) * [ ] [`_mm_cvt_roundi32_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundi32_sh) * [ ] [`_mm_cvt_roundi64_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundi64_sh) * [ ] [`_mm_cvt_roundsd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsd_sh) @@ -460,8 +387,6 @@ * [ ] [`_mm_cvttsh_u64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsh_u64) * [ ] [`_mm_cvtu32_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtu32_sh) * [ ] [`_mm_cvtu64_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtu64_sh) - * [ ] [`_mm_div_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_div_round_sh) - * [ ] [`_mm_div_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_div_sh) * [ ] [`_mm_fcmadd_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fcmadd_round_sch) * [ ] [`_mm_fcmadd_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fcmadd_sch) * [ ] [`_mm_fcmul_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fcmul_round_sch) @@ -483,7 +408,6 @@ * [ ] [`_mm_getexp_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getexp_sh) * [ ] [`_mm_getmant_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getmant_round_sh) * [ ] [`_mm_getmant_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getmant_sh) - * [ ] [`_mm_load_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_sh) * [ ] [`_mm_mask3_fcmadd_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fcmadd_round_sch) * [ ] [`_mm_mask3_fcmadd_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fcmadd_sch) * [ ] [`_mm_mask3_fmadd_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmadd_round_sch) @@ -496,8 +420,6 @@ * [ ] [`_mm_mask3_fnmadd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fnmadd_sh) * [ ] [`_mm_mask3_fnmsub_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fnmsub_round_sh) * [ ] [`_mm_mask3_fnmsub_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fnmsub_sh) - * [ ] [`_mm_mask_add_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_add_round_sh) - * [ ] [`_mm_mask_add_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_add_sh) * [ ] [`_mm_mask_cmp_round_sh_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_round_sh_mask) * [ ] [`_mm_mask_cmp_sh_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_sh_mask) * [ ] [`_mm_mask_cmul_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmul_round_sch) @@ -510,8 +432,6 @@ * [ ] [`_mm_mask_cvtsh_sd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsh_sd) * [ ] [`_mm_mask_cvtsh_ss`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsh_ss) * [ ] [`_mm_mask_cvtss_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtss_sh) - * [ ] [`_mm_mask_div_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_div_round_sh) - * [ ] [`_mm_mask_div_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_div_sh) * [ ] [`_mm_mask_fcmadd_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fcmadd_round_sch) * [ ] [`_mm_mask_fcmadd_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fcmadd_sch) * [ ] [`_mm_mask_fcmul_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fcmul_round_sch) @@ -533,12 +453,8 @@ * [ ] [`_mm_mask_getexp_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getexp_sh) * [ ] [`_mm_mask_getmant_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getmant_round_sh) * [ ] [`_mm_mask_getmant_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getmant_sh) - * [ ] [`_mm_mask_load_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_load_sh) - * [ ] [`_mm_mask_move_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_move_sh) * [ ] [`_mm_mask_mul_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mul_round_sch) - * [ ] [`_mm_mask_mul_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mul_round_sh) * [ ] [`_mm_mask_mul_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mul_sch) - * [ ] [`_mm_mask_mul_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mul_sh) * [ ] [`_mm_mask_rcp_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_rcp_sh) * [ ] [`_mm_mask_reduce_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_reduce_round_sh) * [ ] [`_mm_mask_reduce_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_reduce_sh) @@ -549,11 +465,6 @@ * [ ] [`_mm_mask_scalef_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_scalef_sh) * [ ] [`_mm_mask_sqrt_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sqrt_round_sh) * [ ] [`_mm_mask_sqrt_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sqrt_sh) - * [ ] [`_mm_mask_store_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_store_sh) - * [ ] [`_mm_mask_sub_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sub_round_sh) - * [ ] [`_mm_mask_sub_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sub_sh) - * [ ] [`_mm_maskz_add_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_add_round_sh) - * [ ] [`_mm_maskz_add_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_add_sh) * [ ] [`_mm_maskz_cmul_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cmul_round_sch) * [ ] [`_mm_maskz_cmul_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cmul_sch) * [ ] [`_mm_maskz_cvt_roundsd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvt_roundsd_sh) @@ -564,8 +475,6 @@ * [ ] [`_mm_maskz_cvtsh_sd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtsh_sd) * [ ] [`_mm_maskz_cvtsh_ss`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtsh_ss) * [ ] [`_mm_maskz_cvtss_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtss_sh) - * [ ] [`_mm_maskz_div_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_div_round_sh) - * [ ] [`_mm_maskz_div_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_div_sh) * [ ] [`_mm_maskz_fcmadd_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fcmadd_round_sch) * [ ] [`_mm_maskz_fcmadd_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fcmadd_sch) * [ ] [`_mm_maskz_fcmul_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fcmul_round_sch) @@ -586,12 +495,8 @@ * [ ] [`_mm_maskz_getexp_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getexp_sh) * [ ] [`_mm_maskz_getmant_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getmant_round_sh) * [ ] [`_mm_maskz_getmant_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getmant_sh) - * [ ] [`_mm_maskz_load_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_load_sh) - * [ ] [`_mm_maskz_move_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_move_sh) * [ ] [`_mm_maskz_mul_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mul_round_sch) - * [ ] [`_mm_maskz_mul_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mul_round_sh) * [ ] [`_mm_maskz_mul_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mul_sch) - * [ ] [`_mm_maskz_mul_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mul_sh) * [ ] [`_mm_maskz_rcp_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_rcp_sh) * [ ] [`_mm_maskz_reduce_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_reduce_round_sh) * [ ] [`_mm_maskz_reduce_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_reduce_sh) @@ -602,13 +507,8 @@ * [ ] [`_mm_maskz_scalef_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_scalef_sh) * [ ] [`_mm_maskz_sqrt_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sqrt_round_sh) * [ ] [`_mm_maskz_sqrt_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sqrt_sh) - * [ ] [`_mm_maskz_sub_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sub_round_sh) - * [ ] [`_mm_maskz_sub_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sub_sh) - * [ ] [`_mm_move_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_move_sh) * [ ] [`_mm_mul_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_round_sch) - * [ ] [`_mm_mul_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_round_sh) * [ ] [`_mm_mul_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_sch) - * [ ] [`_mm_mul_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_sh) * [ ] [`_mm_rcp_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rcp_sh) * [ ] [`_mm_reduce_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_round_sh) * [ ] [`_mm_reduce_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_sh) @@ -618,28 +518,14 @@ * [ ] [`_mm_scalef_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_scalef_round_sh) * [ ] [`_mm_scalef_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_scalef_sh) * [ ] [`_mm_set1_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set1_pch) - * [ ] [`_mm_set1_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set1_ph) - * [ ] [`_mm_set_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_ph) - * [ ] [`_mm_set_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_sh) - * [ ] [`_mm_setr_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_setr_ph) * [ ] [`_mm_sqrt_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sqrt_round_sh) * [ ] [`_mm_sqrt_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sqrt_sh) - * [ ] [`_mm_store_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store_sh) - * [ ] [`_mm_sub_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_round_sh) - * [ ] [`_mm_sub_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_sh) - * [ ] [`_mm_ucomieq_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomieq_sh) - * [ ] [`_mm_ucomige_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomige_sh) - * [ ] [`_mm_ucomigt_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomigt_sh) - * [ ] [`_mm_ucomile_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomile_sh) - * [ ] [`_mm_ucomilt_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomilt_sh) - * [ ] [`_mm_ucomineq_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomineq_sh)

["AVX512_FP16", "AVX512VL"]

* [ ] [`_mm256_abs_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_abs_ph) - * [ ] [`_mm256_add_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_add_ph) * [ ] [`_mm256_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmp_ph_mask) * [ ] [`_mm256_cmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmul_pch) * [ ] [`_mm256_conj_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_conj_pch) @@ -665,7 +551,6 @@ * [ ] [`_mm256_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvttph_epu64) * [ ] [`_mm256_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtxph_ps) * [ ] [`_mm256_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtxps_ph) - * [ ] [`_mm256_div_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_div_ph) * [ ] [`_mm256_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fcmadd_pch) * [ ] [`_mm256_fcmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fcmul_pch) * [ ] [`_mm256_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fmadd_pch) @@ -679,8 +564,6 @@ * [ ] [`_mm256_fpclass_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fpclass_ph_mask) * [ ] [`_mm256_getexp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_getexp_ph) * [ ] [`_mm256_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_getmant_ph) - * [ ] [`_mm256_load_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_load_ph) - * [ ] [`_mm256_loadu_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_loadu_ph) * [ ] [`_mm256_mask3_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fcmadd_pch) * [ ] [`_mm256_mask3_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fmadd_pch) * [ ] [`_mm256_mask3_fmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fmadd_ph) @@ -689,7 +572,6 @@ * [ ] [`_mm256_mask3_fmsubadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fmsubadd_ph) * [ ] [`_mm256_mask3_fnmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fnmadd_ph) * [ ] [`_mm256_mask3_fnmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fnmsub_ph) - * [ ] [`_mm256_mask_add_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_add_ph) * [ ] [`_mm256_mask_blend_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_blend_ph) * [ ] [`_mm256_mask_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmp_ph_mask) * [ ] [`_mm256_mask_cmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmul_pch) @@ -716,7 +598,6 @@ * [ ] [`_mm256_mask_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvttph_epu64) * [ ] [`_mm256_mask_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtxph_ps) * [ ] [`_mm256_mask_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtxps_ph) - * [ ] [`_mm256_mask_div_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_div_ph) * [ ] [`_mm256_mask_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fcmadd_pch) * [ ] [`_mm256_mask_fcmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fcmul_pch) * [ ] [`_mm256_mask_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmadd_pch) @@ -733,15 +614,12 @@ * [ ] [`_mm256_mask_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_max_ph) * [ ] [`_mm256_mask_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_min_ph) * [ ] [`_mm256_mask_mul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_mul_pch) - * [ ] [`_mm256_mask_mul_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_mul_ph) * [ ] [`_mm256_mask_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_rcp_ph) * [ ] [`_mm256_mask_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_reduce_ph) * [ ] [`_mm256_mask_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_roundscale_ph) * [ ] [`_mm256_mask_rsqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_rsqrt_ph) * [ ] [`_mm256_mask_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_scalef_ph) * [ ] [`_mm256_mask_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_sqrt_ph) - * [ ] [`_mm256_mask_sub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_sub_ph) - * [ ] [`_mm256_maskz_add_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_add_ph) * [ ] [`_mm256_maskz_cmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cmul_pch) * [ ] [`_mm256_maskz_conj_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_conj_pch) * [ ] [`_mm256_maskz_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi16_ph) @@ -766,7 +644,6 @@ * [ ] [`_mm256_maskz_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvttph_epu64) * [ ] [`_mm256_maskz_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtxph_ps) * [ ] [`_mm256_maskz_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtxps_ph) - * [ ] [`_mm256_maskz_div_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_div_ph) * [ ] [`_mm256_maskz_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fcmadd_pch) * [ ] [`_mm256_maskz_fcmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fcmul_pch) * [ ] [`_mm256_maskz_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmadd_pch) @@ -782,18 +659,15 @@ * [ ] [`_mm256_maskz_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_max_ph) * [ ] [`_mm256_maskz_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_min_ph) * [ ] [`_mm256_maskz_mul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_mul_pch) - * [ ] [`_mm256_maskz_mul_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_mul_ph) * [ ] [`_mm256_maskz_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_rcp_ph) * [ ] [`_mm256_maskz_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_reduce_ph) * [ ] [`_mm256_maskz_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_roundscale_ph) * [ ] [`_mm256_maskz_rsqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_rsqrt_ph) * [ ] [`_mm256_maskz_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_scalef_ph) * [ ] [`_mm256_maskz_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_sqrt_ph) - * [ ] [`_mm256_maskz_sub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_sub_ph) * [ ] [`_mm256_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_max_ph) * [ ] [`_mm256_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_min_ph) * [ ] [`_mm256_mul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mul_pch) - * [ ] [`_mm256_mul_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mul_ph) * [ ] [`_mm256_permutex2var_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutex2var_ph) * [ ] [`_mm256_permutexvar_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutexvar_ph) * [ ] [`_mm256_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_rcp_ph) @@ -805,14 +679,8 @@ * [ ] [`_mm256_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_roundscale_ph) * [ ] [`_mm256_rsqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_rsqrt_ph) * [ ] [`_mm256_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_scalef_ph) - * [ ] [`_mm256_setzero_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setzero_ph) * [ ] [`_mm256_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sqrt_ph) - * [ ] [`_mm256_store_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_store_ph) - * [ ] [`_mm256_storeu_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_storeu_ph) - * [ ] [`_mm256_sub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sub_ph) - * [ ] [`_mm256_undefined_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_undefined_ph) * [ ] [`_mm_abs_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_ph) - * [ ] [`_mm_add_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_ph) * [ ] [`_mm_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_ph_mask) * [ ] [`_mm_cmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmul_pch) * [ ] [`_mm_conj_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_conj_pch) @@ -838,7 +706,6 @@ * [ ] [`_mm_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttph_epu64) * [ ] [`_mm_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtxph_ps) * [ ] [`_mm_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtxps_ph) - * [ ] [`_mm_div_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_div_ph) * [ ] [`_mm_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fcmadd_pch) * [ ] [`_mm_fcmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fcmul_pch) * [ ] [`_mm_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmadd_pch) @@ -852,8 +719,6 @@ * [ ] [`_mm_fpclass_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fpclass_ph_mask) * [ ] [`_mm_getexp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getexp_ph) * [ ] [`_mm_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getmant_ph) - * [ ] [`_mm_load_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_ph) - * [ ] [`_mm_loadu_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_ph) * [ ] [`_mm_mask3_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fcmadd_pch) * [ ] [`_mm_mask3_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmadd_pch) * [ ] [`_mm_mask3_fmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmadd_ph) @@ -862,7 +727,6 @@ * [ ] [`_mm_mask3_fmsubadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmsubadd_ph) * [ ] [`_mm_mask3_fnmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fnmadd_ph) * [ ] [`_mm_mask3_fnmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fnmsub_ph) - * [ ] [`_mm_mask_add_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_add_ph) * [ ] [`_mm_mask_blend_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_blend_ph) * [ ] [`_mm_mask_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_ph_mask) * [ ] [`_mm_mask_cmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmul_pch) @@ -889,7 +753,6 @@ * [ ] [`_mm_mask_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvttph_epu64) * [ ] [`_mm_mask_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtxph_ps) * [ ] [`_mm_mask_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtxps_ph) - * [ ] [`_mm_mask_div_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_div_ph) * [ ] [`_mm_mask_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fcmadd_pch) * [ ] [`_mm_mask_fcmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fcmul_pch) * [ ] [`_mm_mask_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmadd_pch) @@ -910,15 +773,12 @@ * [ ] [`_mm_mask_min_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_min_round_sh) * [ ] [`_mm_mask_min_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_min_sh) * [ ] [`_mm_mask_mul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mul_pch) - * [ ] [`_mm_mask_mul_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mul_ph) * [ ] [`_mm_mask_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_rcp_ph) * [ ] [`_mm_mask_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_reduce_ph) * [ ] [`_mm_mask_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_roundscale_ph) * [ ] [`_mm_mask_rsqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_rsqrt_ph) * [ ] [`_mm_mask_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_scalef_ph) * [ ] [`_mm_mask_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sqrt_ph) - * [ ] [`_mm_mask_sub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sub_ph) - * [ ] [`_mm_maskz_add_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_add_ph) * [ ] [`_mm_maskz_cmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cmul_pch) * [ ] [`_mm_maskz_conj_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_conj_pch) * [ ] [`_mm_maskz_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi16_ph) @@ -943,7 +803,6 @@ * [ ] [`_mm_maskz_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvttph_epu64) * [ ] [`_mm_maskz_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtxph_ps) * [ ] [`_mm_maskz_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtxps_ph) - * [ ] [`_mm_maskz_div_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_div_ph) * [ ] [`_mm_maskz_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fcmadd_pch) * [ ] [`_mm_maskz_fcmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fcmul_pch) * [ ] [`_mm_maskz_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmadd_pch) @@ -963,14 +822,12 @@ * [ ] [`_mm_maskz_min_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_min_round_sh) * [ ] [`_mm_maskz_min_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_min_sh) * [ ] [`_mm_maskz_mul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mul_pch) - * [ ] [`_mm_maskz_mul_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mul_ph) * [ ] [`_mm_maskz_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_rcp_ph) * [ ] [`_mm_maskz_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_reduce_ph) * [ ] [`_mm_maskz_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_roundscale_ph) * [ ] [`_mm_maskz_rsqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_rsqrt_ph) * [ ] [`_mm_maskz_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_scalef_ph) * [ ] [`_mm_maskz_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sqrt_ph) - * [ ] [`_mm_maskz_sub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sub_ph) * [ ] [`_mm_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_ph) * [ ] [`_mm_max_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_round_sh) * [ ] [`_mm_max_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_sh) @@ -978,7 +835,6 @@ * [ ] [`_mm_min_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_round_sh) * [ ] [`_mm_min_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_sh) * [ ] [`_mm_mul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_pch) - * [ ] [`_mm_mul_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_ph) * [ ] [`_mm_permutex2var_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permutex2var_ph) * [ ] [`_mm_permutexvar_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permutexvar_ph) * [ ] [`_mm_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rcp_ph) @@ -990,12 +846,7 @@ * [ ] [`_mm_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_roundscale_ph) * [ ] [`_mm_rsqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rsqrt_ph) * [ ] [`_mm_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_scalef_ph) - * [ ] [`_mm_setzero_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_setzero_ph) * [ ] [`_mm_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sqrt_ph) - * [ ] [`_mm_store_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store_ph) - * [ ] [`_mm_storeu_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storeu_ph) - * [ ] [`_mm_sub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_ph) - * [ ] [`_mm_undefined_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_undefined_ph)

diff --git a/crates/core_arch/src/x86/avx512fp16.rs b/crates/core_arch/src/x86/avx512fp16.rs new file mode 100644 index 0000000000..c6eeff1904 --- /dev/null +++ b/crates/core_arch/src/x86/avx512fp16.rs @@ -0,0 +1,4004 @@ +use crate::arch::asm; +use crate::core_arch::{simd::*, x86::*}; +use crate::intrinsics::simd::*; +use crate::ptr; + +/// Set packed half-precision (16-bit) floating-point elements in dst with the supplied values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_set_ph( + e7: f16, + e6: f16, + e5: f16, + e4: f16, + e3: f16, + e2: f16, + e1: f16, + e0: f16, +) -> __m128h { + __m128h(e0, e1, e2, e3, e4, e5, e6, e7) +} + +/// Set packed half-precision (16-bit) floating-point elements in dst with the supplied values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_set_ph( + e15: f16, + e14: f16, + e13: f16, + e12: f16, + e11: f16, + e10: f16, + e9: f16, + e8: f16, + e7: f16, + e6: f16, + e5: f16, + e4: f16, + e3: f16, + e2: f16, + e1: f16, + e0: f16, +) -> __m256h { + __m256h( + e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, + ) +} + +/// Set packed half-precision (16-bit) floating-point elements in dst with the supplied values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_set_ph( + e31: f16, + e30: f16, + e29: f16, + e28: f16, + e27: f16, + e26: f16, + e25: f16, + e24: f16, + e23: f16, + e22: f16, + e21: f16, + e20: f16, + e19: f16, + e18: f16, + e17: f16, + e16: f16, + e15: f16, + e14: f16, + e13: f16, + e12: f16, + e11: f16, + e10: f16, + e9: f16, + e8: f16, + e7: f16, + e6: f16, + e5: f16, + e4: f16, + e3: f16, + e2: f16, + e1: f16, + e0: f16, +) -> __m512h { + __m512h( + e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, + e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, + ) +} + +/// Copy half-precision (16-bit) floating-point elements from a to the lower element of dst and zero +/// the upper 7 elements. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_set_sh(a: f16) -> __m128h { + __m128h(a, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0) +} + +/// Broadcast the half-precision (16-bit) floating-point value a to all elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set1_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_set1_ph(a: f16) -> __m128h { + transmute(f16x8::splat(a)) +} + +/// Broadcast the half-precision (16-bit) floating-point value a to all elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set1_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_set1_ph(a: f16) -> __m256h { + transmute(f16x16::splat(a)) +} + +/// Broadcast the half-precision (16-bit) floating-point value a to all elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set1_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_set1_ph(a: f16) -> __m512h { + transmute(f16x32::splat(a)) +} + +/// Set packed half-precision (16-bit) floating-point elements in dst with the supplied values in reverse order. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_setr_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_setr_ph( + e0: f16, + e1: f16, + e2: f16, + e3: f16, + e4: f16, + e5: f16, + e6: f16, + e7: f16, +) -> __m128h { + __m128h(e0, e1, e2, e3, e4, e5, e6, e7) +} + +/// Set packed half-precision (16-bit) floating-point elements in dst with the supplied values in reverse order. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setr_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_setr_ph( + e0: f16, + e1: f16, + e2: f16, + e3: f16, + e4: f16, + e5: f16, + e6: f16, + e7: f16, + e8: f16, + e9: f16, + e10: f16, + e11: f16, + e12: f16, + e13: f16, + e14: f16, + e15: f16, +) -> __m256h { + __m256h( + e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, + ) +} + +/// Set packed half-precision (16-bit) floating-point elements in dst with the supplied values in reverse order. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_setr_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_setr_ph( + e0: f16, + e1: f16, + e2: f16, + e3: f16, + e4: f16, + e5: f16, + e6: f16, + e7: f16, + e8: f16, + e9: f16, + e10: f16, + e11: f16, + e12: f16, + e13: f16, + e14: f16, + e15: f16, + e16: f16, + e17: f16, + e18: f16, + e19: f16, + e20: f16, + e21: f16, + e22: f16, + e23: f16, + e24: f16, + e25: f16, + e26: f16, + e27: f16, + e28: f16, + e29: f16, + e30: f16, + e31: f16, +) -> __m512h { + __m512h( + e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, + e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, + ) +} + +/// Return vector of type __m128h with all elements set to zero. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_setzero_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_setzero_ph() -> __m128h { + transmute(f16x8::splat(0.0)) +} + +/// Return vector of type __m256h with all elements set to zero. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setzero_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_setzero_ph() -> __m256h { + transmute(f16x16::splat(0.0)) +} + +/// Return vector of type __m512h with all elements set to zero. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_setzero_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_setzero_ph() -> __m512h { + transmute(f16x32::splat(0.0)) +} + +/// Return vector of type `__m128h` with undefined elements. In practice, this returns the all-zero +/// vector. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_undefined_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_undefined_ph() -> __m128h { + transmute(f16x8::splat(0.0)) +} + +/// Return vector of type `__m256h` with undefined elements. In practice, this returns the all-zero +/// vector. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_undefined_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_undefined_ph() -> __m256h { + transmute(f16x16::splat(0.0)) +} + +/// Return vector of type `__m512h` with undefined elements. In practice, this returns the all-zero +/// vector. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_undefined_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_undefined_ph() -> __m512h { + transmute(f16x32::splat(0.0)) +} + +/// Cast vector of type `__m128d` to type `__m128h`. This intrinsic is only used for compilation and +/// does not generate any instructions, thus it has zero latency. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castpd_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_castpd_ph(a: __m128d) -> __m128h { + transmute(a) +} + +/// Cast vector of type `__m256d` to type `__m256h`. This intrinsic is only used for compilation and +/// does not generate any instructions, thus it has zero latency. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castpd_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_castpd_ph(a: __m256d) -> __m256h { + transmute(a) +} + +/// Cast vector of type `__m512d` to type `__m512h`. This intrinsic is only used for compilation and +/// does not generate any instructions, thus it has zero latency. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castpd_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_castpd_ph(a: __m512d) -> __m512h { + transmute(a) +} + +/// Cast vector of type `__m128h` to type `__m128d`. This intrinsic is only used for compilation and +/// does not generate any instructions, thus it has zero latency. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castph_pd) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_castph_pd(a: __m128h) -> __m128d { + transmute(a) +} + +/// Cast vector of type `__m256h` to type `__m256d`. This intrinsic is only used for compilation and +/// does not generate any instructions, thus it has zero latency. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castph_pd) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_castph_pd(a: __m256h) -> __m256d { + transmute(a) +} + +/// Cast vector of type `__m512h` to type `__m512d`. This intrinsic is only used for compilation and +/// does not generate any instructions, thus it has zero latency. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castph_pd) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_castph_pd(a: __m512h) -> __m512d { + transmute(a) +} + +/// Cast vector of type `__m128` to type `__m128h`. This intrinsic is only used for compilation and +/// does not generate any instructions, thus it has zero latency. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castps_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_castps_ph(a: __m128) -> __m128h { + transmute(a) +} + +/// Cast vector of type `__m256` to type `__m256h`. This intrinsic is only used for compilation and +/// does not generate any instructions, thus it has zero latency. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castps_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_castps_ph(a: __m256) -> __m256h { + transmute(a) +} + +/// Cast vector of type `__m512` to type `__m512h`. This intrinsic is only used for compilation and +/// does not generate any instructions, thus it has zero latency. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castps_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_castps_ph(a: __m512) -> __m512h { + transmute(a) +} + +/// Cast vector of type `__m128h` to type `__m128`. This intrinsic is only used for compilation and +/// does not generate any instructions, thus it has zero latency. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castph_ps) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_castph_ps(a: __m128h) -> __m128 { + transmute(a) +} + +/// Cast vector of type `__m256h` to type `__m256`. This intrinsic is only used for compilation and +/// does not generate any instructions, thus it has zero latency. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castph_ps) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_castph_ps(a: __m256h) -> __m256 { + transmute(a) +} + +/// Cast vector of type `__m512h` to type `__m512`. This intrinsic is only used for compilation and +/// does not generate any instructions, thus it has zero latency. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castph_ps) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_castph_ps(a: __m512h) -> __m512 { + transmute(a) +} + +/// Cast vector of type `__m128i` to type `__m128h`. This intrinsic is only used for compilation and +/// does not generate any instructions, thus it has zero latency. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castsi128_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_castsi128_ph(a: __m128i) -> __m128h { + transmute(a) +} + +/// Cast vector of type `__m256i` to type `__m256h`. This intrinsic is only used for compilation and +/// does not generate any instructions, thus it has zero latency. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castsi256_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_castsi256_ph(a: __m256i) -> __m256h { + transmute(a) +} + +/// Cast vector of type `__m512i` to type `__m512h`. This intrinsic is only used for compilation and +/// does not generate any instructions, thus it has zero latency. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castsi512_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_castsi512_ph(a: __m512i) -> __m512h { + transmute(a) +} + +/// Cast vector of type `__m128h` to type `__m128i`. This intrinsic is only used for compilation and +/// does not generate any instructions, thus it has zero latency. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castph_si128) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_castph_si128(a: __m128h) -> __m128i { + transmute(a) +} + +/// Cast vector of type `__m256h` to type `__m256i`. This intrinsic is only used for compilation and +/// does not generate any instructions, thus it has zero latency. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castph_si256) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_castph_si256(a: __m256h) -> __m256i { + transmute(a) +} + +/// Cast vector of type `__m512h` to type `__m512i`. This intrinsic is only used for compilation and +/// does not generate any instructions, thus it has zero latency. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castph_si512) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_castph_si512(a: __m512h) -> __m512i { + transmute(a) +} + +/// Cast vector of type `__m256h` to type `__m128h`. This intrinsic is only used for compilation and +/// does not generate any instructions, thus it has zero latency. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castph256_ph128) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_castph256_ph128(a: __m256h) -> __m128h { + simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +/// Cast vector of type `__m512h` to type `__m128h`. This intrinsic is only used for compilation and +/// does not generate any instructions, thus it has zero latency. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castph512_ph128) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_castph512_ph128(a: __m512h) -> __m128h { + simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +/// Cast vector of type `__m512h` to type `__m256h`. This intrinsic is only used for compilation and +/// does not generate any instructions, thus it has zero latency. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castph512_ph256) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_castph512_ph256(a: __m512h) -> __m256h { + simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) +} + +/// Cast vector of type `__m128h` to type `__m256h`. The upper 8 elements of the result are undefined. +/// In practice, the upper elements are zeroed. This intrinsic can generate the `vzeroupper` instruction, +/// but most of the time it does not generate any instructions. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castph128_ph256) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_castph128_ph256(a: __m128h) -> __m256h { + simd_shuffle!( + a, + _mm_undefined_ph(), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 8, 8, 8, 8, 8, 8] + ) +} + +/// Cast vector of type `__m128h` to type `__m512h`. The upper 24 elements of the result are undefined. +/// In practice, the upper elements are zeroed. This intrinsic can generate the `vzeroupper` instruction, +/// but most of the time it does not generate any instructions. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castph128_ph512) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_castph128_ph512(a: __m128h) -> __m512h { + simd_shuffle!( + a, + _mm_undefined_ph(), + [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8 + ] + ) +} + +/// Cast vector of type `__m256h` to type `__m512h`. The upper 16 elements of the result are undefined. +/// In practice, the upper elements are zeroed. This intrinsic can generate the `vzeroupper` instruction, +/// but most of the time it does not generate any instructions. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_castph256_ph512) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_castph256_ph512(a: __m256h) -> __m512h { + simd_shuffle!( + a, + _mm256_undefined_ph(), + [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16 + ] + ) +} + +/// Cast vector of type `__m256h` to type `__m128h`. The upper 8 elements of the result are zeroed. +/// This intrinsic can generate the `vzeroupper` instruction, but most of the time it does not generate +/// any instructions. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_zextph128_ph256) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_zextph128_ph256(a: __m128h) -> __m256h { + simd_shuffle!( + a, + _mm_setzero_ph(), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 8, 8, 8, 8, 8, 8] + ) +} + +/// Cast vector of type `__m128h` to type `__m512h`. The upper 24 elements of the result are zeroed. +/// This intrinsic can generate the `vzeroupper` instruction, but most of the time it does not generate +/// any instructions. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_zextph128_ph512) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_zextph128_ph512(a: __m128h) -> __m512h { + simd_shuffle!( + a, + _mm_setzero_ph(), + [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8 + ] + ) +} + +/// Cast vector of type `__m256h` to type `__m512h`. The upper 16 elements of the result are zeroed. +/// This intrinsic can generate the `vzeroupper` instruction, but most of the time it does not generate +/// any instructions. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_zextph256_ph512) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_zextph256_ph512(a: __m256h) -> __m512h { + simd_shuffle!( + a, + _mm256_setzero_ph(), + [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16 + ] + ) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b based on the comparison +/// operand specified by imm8, and return the boolean result (0 or 1). +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comi_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[rustc_legacy_const_generics(2, 3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_comi_round_sh(a: __m128h, b: __m128h) -> i32 { + static_assert_sae!(SAE); + vcomish(a, b, IMM8, SAE) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b based on the comparison +/// operand specified by imm8, and return the boolean result (0 or 1). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comi_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_comi_sh(a: __m128h, b: __m128h) -> i32 { + _mm_comi_round_sh::(a, b) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b for equality, and return +/// the boolean result (0 or 1). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comieq_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_comieq_sh(a: __m128h, b: __m128h) -> i32 { + _mm_comi_sh::<_CMP_EQ_OS>(a, b) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b for greater-than-or-equal, +/// and return the boolean result (0 or 1). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comige_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_comige_sh(a: __m128h, b: __m128h) -> i32 { + _mm_comi_sh::<_CMP_GE_OS>(a, b) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b for greater-than, and return +/// the boolean result (0 or 1). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comigt_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_comigt_sh(a: __m128h, b: __m128h) -> i32 { + _mm_comi_sh::<_CMP_GT_OS>(a, b) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b for less-than-or-equal, and +/// return the boolean result (0 or 1). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comile_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_comile_sh(a: __m128h, b: __m128h) -> i32 { + _mm_comi_sh::<_CMP_LE_OS>(a, b) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b for less-than, and return +/// the boolean result (0 or 1). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comilt_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_comilt_sh(a: __m128h, b: __m128h) -> i32 { + _mm_comi_sh::<_CMP_LT_OS>(a, b) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b for not-equal, and return +/// the boolean result (0 or 1). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comineq_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_comineq_sh(a: __m128h, b: __m128h) -> i32 { + _mm_comi_sh::<_CMP_NEQ_OS>(a, b) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b for equality, and +/// return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomieq_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_ucomieq_sh(a: __m128h, b: __m128h) -> i32 { + _mm_comi_sh::<_CMP_EQ_OQ>(a, b) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b for greater-than-or-equal, +/// and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomige_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_ucomige_sh(a: __m128h, b: __m128h) -> i32 { + _mm_comi_sh::<_CMP_GE_OQ>(a, b) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b for greater-than, and return +/// the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomigt_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_ucomigt_sh(a: __m128h, b: __m128h) -> i32 { + _mm_comi_sh::<_CMP_GT_OQ>(a, b) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b for less-than-or-equal, and +/// return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomile_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_ucomile_sh(a: __m128h, b: __m128h) -> i32 { + _mm_comi_sh::<_CMP_LE_OQ>(a, b) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b for less-than, and return +/// the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomilt_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_ucomilt_sh(a: __m128h, b: __m128h) -> i32 { + _mm_comi_sh::<_CMP_LT_OQ>(a, b) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b for not-equal, and return +/// the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ucomineq_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_ucomineq_sh(a: __m128h, b: __m128h) -> i32 { + _mm_comi_sh::<_CMP_NEQ_OQ>(a, b) +} + +/// Load 128-bits (composed of 8 packed half-precision (16-bit) floating-point elements) from memory into +/// a new vector. The address must be aligned to 16 bytes or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_load_ph(mem_addr: *const f16) -> __m128h { + *mem_addr.cast() +} + +/// Load 256-bits (composed of 16 packed half-precision (16-bit) floating-point elements) from memory into +/// a new vector. The address must be aligned to 32 bytes or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_load_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_load_ph(mem_addr: *const f16) -> __m256h { + *mem_addr.cast() +} + +/// Load 512-bits (composed of 32 packed half-precision (16-bit) floating-point elements) from memory into +/// a new vector. The address must be aligned to 64 bytes or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_load_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_load_ph(mem_addr: *const f16) -> __m512h { + *mem_addr.cast() +} + +/// Load a half-precision (16-bit) floating-point element from memory into the lower element of a new vector, +/// and zero the upper elements +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_load_sh(mem_addr: *const f16) -> __m128h { + _mm_set_sh(*mem_addr) +} + +/// Load a half-precision (16-bit) floating-point element from memory into the lower element of a new vector +/// using writemask k (the element is copied from src when mask bit 0 is not set), and zero the upper elements. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_load_sh) +#[inline] +#[target_feature(enable = "avx512fp16,sse,avx512f")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_load_sh(src: __m128h, k: __mmask8, mem_addr: *const f16) -> __m128h { + let mut dst = src; + asm!( + vpl!("vmovsh {dst}{{{k}}}"), + dst = inout(xmm_reg) dst, + k = in(kreg) k, + p = in(reg) mem_addr, + options(pure, nomem, nostack, preserves_flags) + ); + dst +} + +/// Load a half-precision (16-bit) floating-point element from memory into the lower element of a new vector +/// using zeromask k (the element is zeroed out when mask bit 0 is not set), and zero the upper elements. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_load_sh) +#[inline] +#[target_feature(enable = "avx512fp16,sse,avx512f")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_load_sh(k: __mmask8, mem_addr: *const f16) -> __m128h { + let mut dst: __m128h; + asm!( + vpl!("vmovsh {dst}{{{k}}}{{z}}"), + dst = out(xmm_reg) dst, + k = in(kreg) k, + p = in(reg) mem_addr, + options(pure, nomem, nostack, preserves_flags) + ); + dst +} + +/// Load 128-bits (composed of 8 packed half-precision (16-bit) floating-point elements) from memory into +/// a new vector. The address does not need to be aligned to any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_loadu_ph(mem_addr: *const f16) -> __m128h { + ptr::read_unaligned(mem_addr.cast()) +} + +/// Load 256-bits (composed of 16 packed half-precision (16-bit) floating-point elements) from memory into +/// a new vector. The address does not need to be aligned to any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_loadu_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_loadu_ph(mem_addr: *const f16) -> __m256h { + ptr::read_unaligned(mem_addr.cast()) +} + +/// Load 512-bits (composed of 32 packed half-precision (16-bit) floating-point elements) from memory into +/// a new vector. The address does not need to be aligned to any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_loadu_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_loadu_ph(mem_addr: *const f16) -> __m512h { + ptr::read_unaligned(mem_addr.cast()) +} + +/// Move the lower half-precision (16-bit) floating-point element from b to the lower element of dst +/// using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper +/// 7 packed elements from a to the upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_move_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_move_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + let mut mov: f16 = simd_extract!(src, 0); + if (k & 1) != 0 { + mov = simd_extract!(b, 0); + } + simd_insert!(a, 0, mov) +} + +/// Move the lower half-precision (16-bit) floating-point element from b to the lower element of dst +/// using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed +/// elements from a to the upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_move_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_move_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + let mut mov: f16 = 0.; + if (k & 1) != 0 { + mov = simd_extract!(b, 0); + } + simd_insert!(a, 0, mov) +} + +/// Move the lower half-precision (16-bit) floating-point element from b to the lower element of dst, +/// and copy the upper 7 packed elements from a to the upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_move_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_move_sh(a: __m128h, b: __m128h) -> __m128h { + let mov: f16 = simd_extract!(b, 0); + simd_insert!(a, 0, mov) +} + +/// Store 128-bits (composed of 8 packed half-precision (16-bit) floating-point elements) from a into memory. +/// The address must be aligned to 16 bytes or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_store_ph(mem_addr: *mut f16, a: __m128h) { + *mem_addr.cast() = a; +} + +/// Store 256-bits (composed of 16 packed half-precision (16-bit) floating-point elements) from a into memory. +/// The address must be aligned to 32 bytes or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_store_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_store_ph(mem_addr: *mut f16, a: __m256h) { + *mem_addr.cast() = a; +} + +/// Store 512-bits (composed of 32 packed half-precision (16-bit) floating-point elements) from a into memory. +/// The address must be aligned to 64 bytes or a general-protection exception may be generated. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_store_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_store_ph(mem_addr: *mut f16, a: __m512h) { + *mem_addr.cast() = a; +} + +/// Store the lower half-precision (16-bit) floating-point element from a into memory. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_store_sh(mem_addr: *mut f16, a: __m128h) { + *mem_addr = simd_extract!(a, 0); +} + +/// Store the lower half-precision (16-bit) floating-point element from a into memory using writemask k +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_store_sh) +#[inline] +#[target_feature(enable = "avx512fp16,sse,avx512f")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_store_sh(mem_addr: *mut f16, k: __mmask8, a: __m128h) { + asm!( + vps!("vmovdqu16", "{{{k}}}, {src}"), + p = in(reg) mem_addr, + k = in(kreg) k, + src = in(xmm_reg) a, + options(nostack, preserves_flags) + ); +} + +/// Store 128-bits (composed of 8 packed half-precision (16-bit) floating-point elements) from a into memory. +/// The address does not need to be aligned to any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storeu_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_storeu_ph(mem_addr: *mut f16, a: __m128h) { + ptr::write_unaligned(mem_addr.cast(), a); +} + +/// Store 256-bits (composed of 16 packed half-precision (16-bit) floating-point elements) from a into memory. +/// The address does not need to be aligned to any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_storeu_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_storeu_ph(mem_addr: *mut f16, a: __m256h) { + ptr::write_unaligned(mem_addr.cast(), a); +} + +/// Store 512-bits (composed of 32 packed half-precision (16-bit) floating-point elements) from a into memory. +/// The address does not need to be aligned to any particular boundary. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_storeu_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_storeu_ph(mem_addr: *mut f16, a: __m512h) { + ptr::write_unaligned(mem_addr.cast(), a); +} + +/// Add packed half-precision (16-bit) floating-point elements in a and b, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vaddph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_add_ph(a: __m128h, b: __m128h) -> __m128h { + simd_add(a, b) +} + +/// Add packed half-precision (16-bit) floating-point elements in a and b, and store the results in dst using +/// writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_add_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vaddph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_add_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + let r = _mm_add_ph(a, b); + simd_select_bitmask(k, r, src) +} + +/// Add packed half-precision (16-bit) floating-point elements in a and b, and store the results in dst using +/// zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_add_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vaddph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_add_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + let r = _mm_add_ph(a, b); + simd_select_bitmask(k, r, _mm_setzero_ph()) +} + +/// Add packed half-precision (16-bit) floating-point elements in a and b, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_add_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vaddph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_add_ph(a: __m256h, b: __m256h) -> __m256h { + simd_add(a, b) +} + +/// Add packed half-precision (16-bit) floating-point elements in a and b, and store the results in dst using +/// writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_add_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vaddph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_add_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) -> __m256h { + let r = _mm256_add_ph(a, b); + simd_select_bitmask(k, r, src) +} + +/// Add packed half-precision (16-bit) floating-point elements in a and b, and store the results in dst using +/// zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_add_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vaddph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_add_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h { + let r = _mm256_add_ph(a, b); + simd_select_bitmask(k, r, _mm256_setzero_ph()) +} + +/// Add packed half-precision (16-bit) floating-point elements in a and b, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_add_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vaddph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_add_ph(a: __m512h, b: __m512h) -> __m512h { + simd_add(a, b) +} + +/// Add packed half-precision (16-bit) floating-point elements in a and b, and store the results in dst using +/// writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_add_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vaddph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_add_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) -> __m512h { + let r = _mm512_add_ph(a, b); + simd_select_bitmask(k, r, src) +} + +/// Add packed half-precision (16-bit) floating-point elements in a and b, and store the results in dst using +/// zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_add_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vaddph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_add_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { + let r = _mm512_add_ph(a, b); + simd_select_bitmask(k, r, _mm512_setzero_ph()) +} + +/// Add packed half-precision (16-bit) floating-point elements in a and b, and store the results in dst. +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_add_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vaddph, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_add_round_ph(a: __m512h, b: __m512h) -> __m512h { + static_assert_rounding!(ROUNDING); + vaddph(a, b, ROUNDING) +} + +/// Add packed half-precision (16-bit) floating-point elements in a and b, and store the results in dst using +/// writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_add_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vaddph, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_add_round_ph( + src: __m512h, + k: __mmask32, + a: __m512h, + b: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + let r = _mm512_add_round_ph::(a, b); + simd_select_bitmask(k, r, src) +} + +/// Add packed half-precision (16-bit) floating-point elements in a and b, and store the results in dst using +/// zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_add_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vaddph, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_add_round_ph( + k: __mmask32, + a: __m512h, + b: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + let r = _mm512_add_round_ph::(a, b); + simd_select_bitmask(k, r, _mm512_setzero_ph()) +} + +/// Add the lower half-precision (16-bit) floating-point elements in a and b, store the result in the +/// lower element of dst, and copy the upper 7 packed elements from a to the upper elements of dst. +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vaddsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_add_round_sh(a: __m128h, b: __m128h) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm_mask_add_round_sh::(_mm_undefined_ph(), 0xff, a, b) +} + +/// Add the lower half-precision (16-bit) floating-point elements in a and b, store the result in the +/// lower element of dst, and copy the upper 7 packed elements from a to the upper elements of dst using +/// writemask k (the element is copied from src when mask bit 0 is not set). +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_add_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vaddsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_add_round_sh( + src: __m128h, + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + vaddsh(a, b, src, k, ROUNDING) +} + +/// Add the lower half-precision (16-bit) floating-point elements in a and b, store the result in the +/// lower element of dst, and copy the upper 7 packed elements from a to the upper elements of dst using +/// zeromask k (the element is zeroed out when mask bit 0 is not set). +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_add_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vaddsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_add_round_sh( + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm_mask_add_round_sh::(_mm_setzero_ph(), k, a, b) +} + +/// Add the lower half-precision (16-bit) floating-point elements in a and b, store the result in the +/// lower element of dst, and copy the upper 7 packed elements from a to the upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vaddsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_add_sh(a: __m128h, b: __m128h) -> __m128h { + _mm_add_round_sh::<_MM_FROUND_CUR_DIRECTION>(a, b) +} + +/// Add the lower half-precision (16-bit) floating-point elements in a and b, store the result in the +/// lower element of dst, and copy the upper 7 packed elements from a to the upper elements of dst using +/// writemask k (the element is copied from src when mask bit 0 is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_add_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vaddsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_add_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_add_round_sh::<_MM_FROUND_CUR_DIRECTION>(src, k, a, b) +} + +/// Add the lower half-precision (16-bit) floating-point elements in a and b, store the result in the +/// lower element of dst, and copy the upper 7 packed elements from a to the upper elements of dst using +/// zeromask k (the element is zeroed out when mask bit 0 is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_add_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vaddsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_add_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_maskz_add_round_sh::<_MM_FROUND_CUR_DIRECTION>(k, a, b) +} + +/// Subtract packed half-precision (16-bit) floating-point elements in b from a, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vsubph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_sub_ph(a: __m128h, b: __m128h) -> __m128h { + simd_sub(a, b) +} + +/// Subtract packed half-precision (16-bit) floating-point elements in b from a, and store the results in dst using +/// writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vsubph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_sub_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + let r = _mm_sub_ph(a, b); + simd_select_bitmask(k, r, src) +} + +/// Subtract packed half-precision (16-bit) floating-point elements in b from a, and store the results in dst using +/// zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vsubph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_sub_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + let r = _mm_sub_ph(a, b); + simd_select_bitmask(k, r, _mm_setzero_ph()) +} + +/// Subtract packed half-precision (16-bit) floating-point elements in b from a, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vsubph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_sub_ph(a: __m256h, b: __m256h) -> __m256h { + simd_sub(a, b) +} + +/// Subtract packed half-precision (16-bit) floating-point elements in b from a, and store the results in dst using +/// writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_sub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vsubph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_sub_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) -> __m256h { + let r = _mm256_sub_ph(a, b); + simd_select_bitmask(k, r, src) +} + +/// Subtract packed half-precision (16-bit) floating-point elements in b from a, and store the results in dst using +/// zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_sub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vsubph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_sub_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h { + let r = _mm256_sub_ph(a, b); + simd_select_bitmask(k, r, _mm256_setzero_ph()) +} + +/// Subtract packed half-precision (16-bit) floating-point elements in b from a, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sub_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vsubph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_sub_ph(a: __m512h, b: __m512h) -> __m512h { + simd_sub(a, b) +} + +/// Subtract packed half-precision (16-bit) floating-point elements in b from a, and store the results in dst using +/// writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sub_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vsubph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_sub_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) -> __m512h { + let r = _mm512_sub_ph(a, b); + simd_select_bitmask(k, r, src) +} + +/// Subtract packed half-precision (16-bit) floating-point elements in b from a, and store the results in dst using +/// zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sub_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vsubph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_sub_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { + let r = _mm512_sub_ph(a, b); + simd_select_bitmask(k, r, _mm512_setzero_ph()) +} + +/// Subtract packed half-precision (16-bit) floating-point elements in b from a, and store the results in dst. +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sub_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vsubph, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_sub_round_ph(a: __m512h, b: __m512h) -> __m512h { + static_assert_rounding!(ROUNDING); + vsubph(a, b, ROUNDING) +} + +/// Subtract packed half-precision (16-bit) floating-point elements in b from a, and store the results in dst using +/// writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sub_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vsubph, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_sub_round_ph( + src: __m512h, + k: __mmask32, + a: __m512h, + b: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + let r = _mm512_sub_round_ph::(a, b); + simd_select_bitmask(k, r, src) +} + +/// Subtract packed half-precision (16-bit) floating-point elements in b from a, and store the results in dst using +/// zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sub_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vsubph, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_sub_round_ph( + k: __mmask32, + a: __m512h, + b: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + let r = _mm512_sub_round_ph::(a, b); + simd_select_bitmask(k, r, _mm512_setzero_ph()) +} + +/// Subtract the lower half-precision (16-bit) floating-point elements in b from a, store the result in the +/// lower element of dst, and copy the upper 7 packed elements from a to the upper elements of dst. +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vsubsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_sub_round_sh(a: __m128h, b: __m128h) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm_mask_sub_round_sh::(_mm_undefined_ph(), 0xff, a, b) +} + +/// Subtract the lower half-precision (16-bit) floating-point elements in b from a, store the result in the +/// lower element of dst, and copy the upper 7 packed elements from a to the upper elements of dst using +/// writemask k (the element is copied from src when mask bit 0 is not set). +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sub_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vsubsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_sub_round_sh( + src: __m128h, + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + vsubsh(a, b, src, k, ROUNDING) +} + +/// Subtract the lower half-precision (16-bit) floating-point elements in b from a, store the result in the +/// lower element of dst, and copy the upper 7 packed elements from a to the upper elements of dst using +/// zeromask k (the element is zeroed out when mask bit 0 is not set). +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sub_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vsubsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_sub_round_sh( + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm_mask_sub_round_sh::(_mm_setzero_ph(), k, a, b) +} + +/// Subtract the lower half-precision (16-bit) floating-point elements in b from a, store the result in the +/// lower element of dst, and copy the upper 7 packed elements from a to the upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vsubsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_sub_sh(a: __m128h, b: __m128h) -> __m128h { + _mm_sub_round_sh::<_MM_FROUND_CUR_DIRECTION>(a, b) +} + +/// Subtract the lower half-precision (16-bit) floating-point elements in b from a, store the result in the +/// lower element of dst, and copy the upper 7 packed elements from a to the upper elements of dst using +/// writemask k (the element is copied from src when mask bit 0 is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sub_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vsubsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_sub_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_sub_round_sh::<_MM_FROUND_CUR_DIRECTION>(src, k, a, b) +} + +/// Subtract the lower half-precision (16-bit) floating-point elements in b from a, store the result in the +/// lower element of dst, and copy the upper 7 packed elements from a to the upper elements of dst using +/// zeromask k (the element is zeroed out when mask bit 0 is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sub_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vsubsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_sub_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_maskz_sub_round_sh::<_MM_FROUND_CUR_DIRECTION>(k, a, b) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vmulph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mul_ph(a: __m128h, b: __m128h) -> __m128h { + simd_mul(a, b) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, and store the results in dst using +/// writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mul_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vmulph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_mul_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + let r = _mm_mul_ph(a, b); + simd_select_bitmask(k, r, src) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, and store the results in dst using +/// zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mul_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vmulph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_mul_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + let r = _mm_mul_ph(a, b); + simd_select_bitmask(k, r, _mm_setzero_ph()) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mul_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vmulph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mul_ph(a: __m256h, b: __m256h) -> __m256h { + simd_mul(a, b) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, and store the results in dst using +/// writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_mul_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vmulph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_mul_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) -> __m256h { + let r = _mm256_mul_ph(a, b); + simd_select_bitmask(k, r, src) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, and store the results in dst using +/// zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_mul_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vmulph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_mul_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h { + let r = _mm256_mul_ph(a, b); + simd_select_bitmask(k, r, _mm256_setzero_ph()) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mul_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vmulph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mul_ph(a: __m512h, b: __m512h) -> __m512h { + simd_mul(a, b) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, and store the results in dst using +/// writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mul_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vmulph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_mul_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) -> __m512h { + let r = _mm512_mul_ph(a, b); + simd_select_bitmask(k, r, src) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, and store the results in dst using +/// zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mul_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vmulph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_mul_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { + let r = _mm512_mul_ph(a, b); + simd_select_bitmask(k, r, _mm512_setzero_ph()) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, and store the results in dst. +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mul_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vmulph, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mul_round_ph(a: __m512h, b: __m512h) -> __m512h { + static_assert_rounding!(ROUNDING); + vmulph(a, b, ROUNDING) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, and store the results in dst using +/// writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mul_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vmulph, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_mul_round_ph( + src: __m512h, + k: __mmask32, + a: __m512h, + b: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + let r = _mm512_mul_round_ph::(a, b); + simd_select_bitmask(k, r, src) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, and store the results in dst using +/// zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mul_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vmulph, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_mul_round_ph( + k: __mmask32, + a: __m512h, + b: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + let r = _mm512_mul_round_ph::(a, b); + simd_select_bitmask(k, r, _mm512_setzero_ph()) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, store the result in the +/// lower element of dst, and copy the upper 7 packed elements from a to the upper elements of dst. +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vmulsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mul_round_sh(a: __m128h, b: __m128h) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm_mask_mul_round_sh::(_mm_undefined_ph(), 0xff, a, b) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, store the result in the +/// lower element of dst, and copy the upper 7 packed elements from a to the upper elements of dst using +/// writemask k (the element is copied from src when mask bit 0 is not set). +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mul_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vmulsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_mul_round_sh( + src: __m128h, + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + vmulsh(a, b, src, k, ROUNDING) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, store the result in the +/// lower element of dst, and copy the upper 7 packed elements from a to the upper elements of dst using +/// zeromask k (the element is zeroed out when mask bit 0 is not set). +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mul_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vmulsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_mul_round_sh( + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm_mask_mul_round_sh::(_mm_setzero_ph(), k, a, b) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, store the result in the +/// lower element of dst, and copy the upper 7 packed elements from a to the upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vmulsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mul_sh(a: __m128h, b: __m128h) -> __m128h { + _mm_mul_round_sh::<_MM_FROUND_CUR_DIRECTION>(a, b) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, store the result in the +/// lower element of dst, and copy the upper 7 packed elements from a to the upper elements of dst using +/// writemask k (the element is copied from src when mask bit 0 is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mul_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vmulsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_mul_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_mul_round_sh::<_MM_FROUND_CUR_DIRECTION>(src, k, a, b) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, store the result in the +/// lower element of dst, and copy the upper 7 packed elements from a to the upper elements of dst using +/// zeromask k (the element is zeroed out when mask bit 0 is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mul_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vmulsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_mul_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_maskz_mul_round_sh::<_MM_FROUND_CUR_DIRECTION>(k, a, b) +} + +/// Divide packed half-precision (16-bit) floating-point elements in a by b, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_div_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vdivph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_div_ph(a: __m128h, b: __m128h) -> __m128h { + simd_div(a, b) +} + +/// Divide packed half-precision (16-bit) floating-point elements in a by b, and store the results in dst using +/// writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_div_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vdivph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_div_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + let r = _mm_div_ph(a, b); + simd_select_bitmask(k, r, src) +} + +/// Divide packed half-precision (16-bit) floating-point elements in a by b, and store the results in dst using +/// zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_div_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vdivph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_div_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + let r = _mm_div_ph(a, b); + simd_select_bitmask(k, r, _mm_setzero_ph()) +} + +/// Divide packed half-precision (16-bit) floating-point elements in a by b, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_div_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vdivph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_div_ph(a: __m256h, b: __m256h) -> __m256h { + simd_div(a, b) +} + +/// Divide packed half-precision (16-bit) floating-point elements in a by b, and store the results in dst using +/// writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_div_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vdivph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_div_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) -> __m256h { + let r = _mm256_div_ph(a, b); + simd_select_bitmask(k, r, src) +} + +/// Divide packed half-precision (16-bit) floating-point elements in a by b, and store the results in dst using +/// zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_div_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vdivph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_div_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h { + let r = _mm256_div_ph(a, b); + simd_select_bitmask(k, r, _mm256_setzero_ph()) +} + +/// Divide packed half-precision (16-bit) floating-point elements in a by b, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_div_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vdivph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_div_ph(a: __m512h, b: __m512h) -> __m512h { + simd_div(a, b) +} + +/// Divide packed half-precision (16-bit) floating-point elements in a by b, and store the results in dst using +/// writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_div_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vdivph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_div_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) -> __m512h { + let r = _mm512_div_ph(a, b); + simd_select_bitmask(k, r, src) +} + +/// Divide packed half-precision (16-bit) floating-point elements in a by b, and store the results in dst using +/// zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_div_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vdivph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_div_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { + let r = _mm512_div_ph(a, b); + simd_select_bitmask(k, r, _mm512_setzero_ph()) +} + +/// Divide packed half-precision (16-bit) floating-point elements in a by b, and store the results in dst. +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_div_round_ph) + +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vdivph, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_div_round_ph(a: __m512h, b: __m512h) -> __m512h { + static_assert_rounding!(ROUNDING); + vdivph(a, b, ROUNDING) +} + +/// Divide packed half-precision (16-bit) floating-point elements in a by b, and store the results in dst using +/// writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_div_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vdivph, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_div_round_ph( + src: __m512h, + k: __mmask32, + a: __m512h, + b: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + let r = _mm512_div_round_ph::(a, b); + simd_select_bitmask(k, r, src) +} + +/// Divide packed half-precision (16-bit) floating-point elements in a by b, and store the results in dst using +/// zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_div_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vdivph, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_div_round_ph( + k: __mmask32, + a: __m512h, + b: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + let r = _mm512_div_round_ph::(a, b); + simd_select_bitmask(k, r, _mm512_setzero_ph()) +} + +/// Divide the lower half-precision (16-bit) floating-point elements in a by b, store the result in the +/// lower element of dst, and copy the upper 7 packed elements from a to the upper elements of dst. +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_div_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vdivsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_div_round_sh(a: __m128h, b: __m128h) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm_mask_div_round_sh::(_mm_undefined_ph(), 0xff, a, b) +} + +/// Divide the lower half-precision (16-bit) floating-point elements in a by b, store the result in the +/// lower element of dst, and copy the upper 7 packed elements from a to the upper elements of dst using +/// writemask k (the element is copied from src when mask bit 0 is not set). +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_div_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vdivsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_div_round_sh( + src: __m128h, + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + vdivsh(a, b, src, k, ROUNDING) +} + +/// Divide the lower half-precision (16-bit) floating-point elements in a by b, store the result in the +/// lower element of dst, and copy the upper 7 packed elements from a to the upper elements of dst using +/// zeromask k (the element is zeroed out when mask bit 0 is not set). +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_div_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vdivsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_div_round_sh( + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm_mask_div_round_sh::(_mm_setzero_ph(), k, a, b) +} + +/// Divide the lower half-precision (16-bit) floating-point elements in a by b, store the result in the +/// lower element of dst, and copy the upper 7 packed elements from a to the upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_div_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vdivsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_div_sh(a: __m128h, b: __m128h) -> __m128h { + _mm_div_round_sh::<_MM_FROUND_CUR_DIRECTION>(a, b) +} + +/// Divide the lower half-precision (16-bit) floating-point elements in a by b, store the result in the +/// lower element of dst, and copy the upper 7 packed elements from a to the upper elements of dst using +/// writemask k (the element is copied from src when mask bit 0 is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_div_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vdivsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_div_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_div_round_sh::<_MM_FROUND_CUR_DIRECTION>(src, k, a, b) +} + +/// Divide the lower half-precision (16-bit) floating-point elements in a by b, store the result in the +/// lower element of dst, and copy the upper 7 packed elements from a to the upper elements of dst using +/// zeromask k (the element is zeroed out when mask bit 0 is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_div_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vdivsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_div_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_maskz_div_round_sh::<_MM_FROUND_CUR_DIRECTION>(k, a, b) +} + +#[allow(improper_ctypes)] +extern "C" { + #[link_name = "llvm.x86.avx512fp16.vcomi.sh"] + fn vcomish(a: __m128h, b: __m128h, imm8: i32, sae: i32) -> i32; + + #[link_name = "llvm.x86.avx512fp16.add.ph.512"] + fn vaddph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.sub.ph.512"] + fn vsubph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mul.ph.512"] + fn vmulph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.div.ph.512"] + fn vdivph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + + #[link_name = "llvm.x86.avx512fp16.mask.add.sh.round"] + fn vaddsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.sub.sh.round"] + fn vsubsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.mul.sh.round"] + fn vmulsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.div.sh.round"] + fn vdivsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + +} + +#[cfg(test)] +mod tests { + use crate::core_arch::x86::*; + use crate::mem::transmute; + use crate::ptr::{addr_of, addr_of_mut}; + use stdarch_test::simd_test; + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_set_ph() { + let r = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let e = _mm_setr_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_set_ph() { + let r = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let e = _mm256_setr_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_set_ph() { + let r = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let e = _mm512_setr_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_set_sh() { + let r = _mm_set_sh(1.0); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_set1_ph() { + let r = _mm_set1_ph(1.0); + let e = _mm_set_ph(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_set1_ph() { + let r = _mm256_set1_ph(1.0); + let e = _mm256_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_set1_ph() { + let r = _mm512_set1_ph(1.0); + let e = _mm512_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_setr_ph() { + let r = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let e = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_setr_ph() { + let r = _mm256_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let e = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_setr_ph() { + let r = _mm512_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let e = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_setzero_ph() { + let r = _mm_setzero_ph(); + let e = _mm_set1_ph(0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_setzero_ph() { + let r = _mm256_setzero_ph(); + let e = _mm256_set1_ph(0.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_setzero_ph() { + let r = _mm512_setzero_ph(); + let e = _mm512_set1_ph(0.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castsi128_ph() { + let a = _mm_set1_epi16(0x3c00); + let r = _mm_castsi128_ph(a); + let e = _mm_set1_ph(1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castsi256_ph() { + let a = _mm256_set1_epi16(0x3c00); + let r = _mm256_castsi256_ph(a); + let e = _mm256_set1_ph(1.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castsi512_ph() { + let a = _mm512_set1_epi16(0x3c00); + let r = _mm512_castsi512_ph(a); + let e = _mm512_set1_ph(1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castph_si128() { + let a = _mm_set1_ph(1.0); + let r = _mm_castph_si128(a); + let e = _mm_set1_epi16(0x3c00); + assert_eq_m128i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph_si256() { + let a = _mm256_set1_ph(1.0); + let r = _mm256_castph_si256(a); + let e = _mm256_set1_epi16(0x3c00); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph_si512() { + let a = _mm512_set1_ph(1.0); + let r = _mm512_castph_si512(a); + let e = _mm512_set1_epi16(0x3c00); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castps_ph() { + let a = _mm_castsi128_ps(_mm_set1_epi16(0x3c00)); + let r = _mm_castps_ph(a); + let e = _mm_set1_ph(1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castps_ph() { + let a = _mm256_castsi256_ps(_mm256_set1_epi16(0x3c00)); + let r = _mm256_castps_ph(a); + let e = _mm256_set1_ph(1.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castps_ph() { + let a = _mm512_castsi512_ps(_mm512_set1_epi16(0x3c00)); + let r = _mm512_castps_ph(a); + let e = _mm512_set1_ph(1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castph_ps() { + let a = _mm_castsi128_ph(_mm_set1_epi32(0x3f800000)); + let r = _mm_castph_ps(a); + let e = _mm_set1_ps(1.0); + assert_eq_m128(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph_ps() { + let a = _mm256_castsi256_ph(_mm256_set1_epi32(0x3f800000)); + let r = _mm256_castph_ps(a); + let e = _mm256_set1_ps(1.0); + assert_eq_m256(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph_ps() { + let a = _mm512_castsi512_ph(_mm512_set1_epi32(0x3f800000)); + let r = _mm512_castph_ps(a); + let e = _mm512_set1_ps(1.0); + assert_eq_m512(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castpd_ph() { + let a = _mm_castsi128_pd(_mm_set1_epi16(0x3c00)); + let r = _mm_castpd_ph(a); + let e = _mm_set1_ph(1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castpd_ph() { + let a = _mm256_castsi256_pd(_mm256_set1_epi16(0x3c00)); + let r = _mm256_castpd_ph(a); + let e = _mm256_set1_ph(1.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castpd_ph() { + let a = _mm512_castsi512_pd(_mm512_set1_epi16(0x3c00)); + let r = _mm512_castpd_ph(a); + let e = _mm512_set1_ph(1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castph_pd() { + let a = _mm_castsi128_ph(_mm_set1_epi64x(0x3ff0000000000000)); + let r = _mm_castph_pd(a); + let e = _mm_set1_pd(1.0); + assert_eq_m128d(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph_pd() { + let a = _mm256_castsi256_ph(_mm256_set1_epi64x(0x3ff0000000000000)); + let r = _mm256_castph_pd(a); + let e = _mm256_set1_pd(1.0); + assert_eq_m256d(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph_pd() { + let a = _mm512_castsi512_ph(_mm512_set1_epi64(0x3ff0000000000000)); + let r = _mm512_castph_pd(a); + let e = _mm512_set1_pd(1.0); + assert_eq_m512d(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph256_ph128() { + let a = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + let r = _mm256_castph256_ph128(a); + let e = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph512_ph128() { + let a = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., + ); + let r = _mm512_castph512_ph128(a); + let e = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph512_ph256() { + let a = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., + ); + let r = _mm512_castph512_ph256(a); + let e = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph128_ph256() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm256_castph128_ph256(a); + assert_eq_m128h(_mm256_castph256_ph128(r), a); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph128_ph512() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm512_castph128_ph512(a); + assert_eq_m128h(_mm512_castph512_ph128(r), a); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph256_ph512() { + let a = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + let r = _mm512_castph256_ph512(a); + assert_eq_m256h(_mm512_castph512_ph256(r), a); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_zextph128_ph256() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm256_zextph128_ph256(a); + let e = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 0., 0., 0., 0., 0., 0., 0., 0., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_zextph128_ph512() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm512_zextph128_ph512(a); + let e = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_zextph256_ph512() { + let a = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + let r = _mm512_zextph256_ph512(a); + let e = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comi_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_comi_round_sh::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comi_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_comi_sh::<_CMP_EQ_OQ>(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comieq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_comieq_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comige_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_comige_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comigt_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_comigt_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comile_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_comile_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comilt_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_comilt_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comineq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_comineq_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomieq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_ucomieq_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomige_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_ucomige_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomigt_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_ucomigt_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomile_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_ucomile_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomilt_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_ucomilt_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomineq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_ucomineq_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_load_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_load_ph(addr_of!(a).cast()); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_load_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_load_ph(addr_of!(a).cast()); + assert_eq_m256h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_load_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_load_ph(addr_of!(a).cast()); + assert_eq_m512h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_load_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_load_sh(addr_of!(a).cast()); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_load_sh() { + let a = _mm_set_sh(1.0); + let src = _mm_set_sh(2.); + let b = _mm_mask_load_sh(src, 1, addr_of!(a).cast()); + assert_eq_m128h(a, b); + let b = _mm_mask_load_sh(src, 0, addr_of!(a).cast()); + assert_eq_m128h(src, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_load_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_maskz_load_sh(1, addr_of!(a).cast()); + assert_eq_m128h(a, b); + let b = _mm_maskz_load_sh(0, addr_of!(a).cast()); + assert_eq_m128h(_mm_setzero_ph(), b); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_loadu_ph() { + let array = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; + let r = _mm_loadu_ph(array.as_ptr()); + let e = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_loadu_ph() { + let array = [ + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ]; + let r = _mm256_loadu_ph(array.as_ptr()); + let e = _mm256_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_loadu_ph() { + let array = [ + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ]; + let r = _mm512_loadu_ph(array.as_ptr()); + let e = _mm512_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_move_sh() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_sh(9.0); + let r = _mm_move_sh(a, b); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_move_sh() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_sh(9.0); + let src = _mm_set_sh(10.0); + let r = _mm_mask_move_sh(src, 0, a, b); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 10.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_move_sh() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_sh(9.0); + let r = _mm_maskz_move_sh(0, a, b); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_store_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let mut b = _mm_setzero_ph(); + _mm_store_ph(addr_of_mut!(b).cast(), a); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_store_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let mut b = _mm256_setzero_ph(); + _mm256_store_ph(addr_of_mut!(b).cast(), a); + assert_eq_m256h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_store_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let mut b = _mm512_setzero_ph(); + _mm512_store_ph(addr_of_mut!(b).cast(), a); + assert_eq_m512h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_store_sh() { + let a = _mm_set_sh(1.0); + let mut b = _mm_setzero_ph(); + _mm_store_sh(addr_of_mut!(b).cast(), a); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_store_sh() { + let a = _mm_set_sh(1.0); + let mut b = _mm_setzero_ph(); + _mm_mask_store_sh(addr_of_mut!(b).cast(), 0, a); + assert_eq_m128h(_mm_setzero_ph(), b); + _mm_mask_store_sh(addr_of_mut!(b).cast(), 1, a); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_storeu_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let mut array = [0.0; 8]; + _mm_storeu_ph(array.as_mut_ptr(), a); + assert_eq_m128h(a, _mm_loadu_ph(array.as_ptr())); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_storeu_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let mut array = [0.0; 16]; + _mm256_storeu_ph(array.as_mut_ptr(), a); + assert_eq_m256h(a, _mm256_loadu_ph(array.as_ptr())); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_storeu_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let mut array = [0.0; 32]; + _mm512_storeu_ph(array.as_mut_ptr(), a); + assert_eq_m512h(a, _mm512_loadu_ph(array.as_ptr())); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_add_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_add_ph(a, b); + let e = _mm_set1_ph(9.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_add_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_add_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(10., 9., 12., 9., 14., 9., 16., 9.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_add_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_maskz_add_ph(0b01010101, a, b); + let e = _mm_set_ph(0., 9., 0., 9., 0., 9., 0., 9.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_add_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_add_ph(a, b); + let e = _mm256_set1_ph(17.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_add_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let src = _mm256_set_ph( + 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + ); + let r = _mm256_mask_add_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 18., 17., 20., 17., 22., 17., 24., 17., 26., 17., 28., 17., 30., 17., 32., 17., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_add_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_maskz_add_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_add_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_add_ph(a, b); + let e = _mm512_set1_ph(33.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_add_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_add_ph(src, 0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 34., 33., 36., 33., 38., 33., 40., 33., 42., 33., 44., 33., 46., 33., 48., 33., 50., + 33., 52., 33., 54., 33., 56., 33., 58., 33., 60., 33., 62., 33., 64., 33., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_add_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_add_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., + 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_add_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_ph(33.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_add_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 34., 33., 36., 33., 38., 33., 40., 33., 42., 33., 44., 33., 46., 33., 48., 33., 50., + 33., 52., 33., 54., 33., 56., 33., 58., 33., 60., 33., 62., 33., 64., 33., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_add_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., + 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_add_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_add_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_add_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = + _mm_maskz_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_add_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_add_sh(a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_add_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_add_sh(src, 0, a, b); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_add_sh(src, 1, a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_add_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_maskz_add_sh(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_add_sh(1, a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_sub_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_sub_ph(a, b); + let e = _mm_set_ph(-7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_sub_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_sub_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(10., -5., 12., -1., 14., 3., 16., 7.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_sub_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_maskz_sub_ph(0b01010101, a, b); + let e = _mm_set_ph(0., -5., 0., -1., 0., 3., 0., 7.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_sub_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_sub_ph(a, b); + let e = _mm256_set_ph( + -15.0, -13.0, -11.0, -9.0, -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, + 15.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_sub_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let src = _mm256_set_ph( + 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + ); + let r = _mm256_mask_sub_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 18., -13., 20., -9., 22., -5., 24., -1., 26., 3., 28., 7., 30., 11., 32., 15., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_sub_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_maskz_sub_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., 0., 7., 0., 11., 0., 15., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_sub_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_sub_ph(a, b); + let e = _mm512_set_ph( + -31.0, -29.0, -27.0, -25.0, -23.0, -21.0, -19.0, -17.0, -15.0, -13.0, -11.0, -9.0, + -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, + 23.0, 25.0, 27.0, 29.0, 31.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_sub_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_sub_ph(src, 0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 34., -29., 36., -25., 38., -21., 40., -17., 42., -13., 44., -9., 46., -5., 48., -1., + 50., 3., 52., 7., 54., 11., 56., 15., 58., 19., 60., 23., 62., 27., 64., 31., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_sub_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_sub_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0., -29., 0., -25., 0., -21., 0., -17., 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., + 0., 7., 0., 11., 0., 15., 0., 19., 0., 23., 0., 27., 0., 31., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_sub_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set_ph( + -31.0, -29.0, -27.0, -25.0, -23.0, -21.0, -19.0, -17.0, -15.0, -13.0, -11.0, -9.0, + -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, + 23.0, 25.0, 27.0, 29.0, 31.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_sub_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 34., -29., 36., -25., 38., -21., 40., -17., 42., -13., 44., -9., 46., -5., 48., -1., + 50., 3., 52., 7., 54., 11., 56., 15., 58., 19., 60., 23., 62., 27., 64., 31., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_sub_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 0., -29., 0., -25., 0., -21., 0., -17., 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., + 0., 7., 0., 11., 0., 15., 0., 19., 0., 23., 0., 27., 0., 31., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_sub_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_sub_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_sub_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = + _mm_maskz_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_sub_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_sub_sh(a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_sub_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_sub_sh(src, 0, a, b); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_sub_sh(src, 1, a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_sub_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_maskz_sub_sh(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_sub_sh(1, a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mul_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_mul_ph(a, b); + let e = _mm_set_ph(8.0, 14.0, 18.0, 20.0, 20.0, 18.0, 14.0, 8.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_mul_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_mul_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(10., 14., 12., 20., 14., 18., 16., 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_mul_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_maskz_mul_ph(0b01010101, a, b); + let e = _mm_set_ph(0., 14., 0., 20., 0., 18., 0., 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mul_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_mul_ph(a, b); + let e = _mm256_set_ph( + 16.0, 30.0, 42.0, 52.0, 60.0, 66.0, 70.0, 72.0, 72.0, 70.0, 66.0, 60.0, 52.0, 42.0, + 30.0, 16.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_mul_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let src = _mm256_set_ph( + 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + ); + let r = _mm256_mask_mul_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 18., 30., 20., 52., 22., 66., 24., 72., 26., 70., 28., 60., 30., 42., 32., 16., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_mul_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_maskz_mul_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0., 30., 0., 52., 0., 66., 0., 72., 0., 70., 0., 60., 0., 42., 0., 16., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mul_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_mul_ph(a, b); + let e = _mm512_set_ph( + 32.0, 62.0, 90.0, 116.0, 140.0, 162.0, 182.0, 200.0, 216.0, 230.0, 242.0, 252.0, 260.0, + 266.0, 270.0, 272.0, 272.0, 270.0, 266.0, 260.0, 252.0, 242.0, 230.0, 216.0, 200.0, + 182.0, 162.0, 140.0, 116.0, 90.0, 62.0, 32.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_mul_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_mul_ph(src, 0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 34., 62., 36., 116., 38., 162., 40., 200., 42., 230., 44., 252., 46., 266., 48., 272., + 50., 270., 52., 260., 54., 242., 56., 216., 58., 182., 60., 140., 62., 90., 64., 32., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_mul_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_mul_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0., 62., 0., 116., 0., 162., 0., 200., 0., 230., 0., 252., 0., 266., 0., 272., 0., + 270., 0., 260., 0., 242., 0., 216., 0., 182., 0., 140., 0., 90., 0., 32., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mul_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set_ph( + 32.0, 62.0, 90.0, 116.0, 140.0, 162.0, 182.0, 200.0, 216.0, 230.0, 242.0, 252.0, 260.0, + 266.0, 270.0, 272.0, 272.0, 270.0, 266.0, 260.0, 252.0, 242.0, 230.0, 216.0, 200.0, + 182.0, 162.0, 140.0, 116.0, 90.0, 62.0, 32.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_mul_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 34., 62., 36., 116., 38., 162., 40., 200., 42., 230., 44., 252., 46., 266., 48., 272., + 50., 270., 52., 260., 54., 242., 56., 216., 58., 182., 60., 140., 62., 90., 64., 32., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_mul_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 0., 62., 0., 116., 0., 162., 0., 200., 0., 230., 0., 252., 0., 266., 0., 272., 0., + 270., 0., 260., 0., 242., 0., 216., 0., 182., 0., 140., 0., 90., 0., 32., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mul_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_mul_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_mul_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = + _mm_maskz_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mul_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_mul_sh(a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_mul_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_mul_sh(src, 0, a, b); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_mul_sh(src, 1, a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_mul_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_maskz_mul_sh(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_mul_sh(1, a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_div_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let r = _mm_div_ph(a, b); + let e = _mm_set1_ph(0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_div_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let src = _mm_set_ph(4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0); + let r = _mm_mask_div_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_div_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let r = _mm_maskz_div_ph(0b01010101, a, b); + let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_div_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let r = _mm256_div_ph(a, b); + let e = _mm256_set1_ph(0.5); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_div_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let src = _mm256_set_ph( + 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, + 19.0, + ); + let r = _mm256_mask_div_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_div_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let r = _mm256_maskz_div_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_div_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_div_ph(a, b); + let e = _mm512_set1_ph(0.5); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_div_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let src = _mm512_set_ph( + 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, + 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, + 33.0, 34.0, 35.0, + ); + let r = _mm512_mask_div_ph(src, 0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, + 20.0, 0.5, 22.0, 0.5, 24.0, 0.5, 26.0, 0.5, 28.0, 0.5, 30.0, 0.5, 32.0, 0.5, 34.0, 0.5, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_div_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_maskz_div_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, + 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_div_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_ph(0.5); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_div_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let src = _mm512_set_ph( + 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, + 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, + 33.0, 34.0, 35.0, + ); + let r = _mm512_mask_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, + 20.0, 0.5, 22.0, 0.5, 24.0, 0.5, 26.0, 0.5, 28.0, 0.5, 30.0, 0.5, 32.0, 0.5, 34.0, 0.5, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_div_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_maskz_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, + 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_div_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_div_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_set_sh(0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_div_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = + _mm_maskz_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_div_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_div_sh(a, b); + let e = _mm_set_sh(0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_div_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_div_sh(src, 0, a, b); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_div_sh(src, 1, a, b); + let e = _mm_set_sh(0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_div_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_maskz_div_sh(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_div_sh(1, a, b); + let e = _mm_set_sh(0.5); + assert_eq_m128h(r, e); + } +} diff --git a/crates/core_arch/src/x86/mod.rs b/crates/core_arch/src/x86/mod.rs index d3d4381cc7..6f10d828dd 100644 --- a/crates/core_arch/src/x86/mod.rs +++ b/crates/core_arch/src/x86/mod.rs @@ -1004,3 +1004,7 @@ pub use self::avx512bf16::*; mod avxneconvert; #[unstable(feature = "stdarch_x86_avx512", issue = "111137")] pub use self::avxneconvert::*; + +mod avx512fp16; +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub use self::avx512fp16::*; From bf92f837fcdda6dac7d12332bcfa758c1f227f56 Mon Sep 17 00:00:00 2001 From: sayantn Date: Mon, 8 Jul 2024 20:00:07 +0530 Subject: [PATCH 03/11] AVX512_FP16 Part 2: Complex Multiplication --- crates/core_arch/missing-x86.md | 76 - crates/core_arch/src/x86/avx512fp16.rs | 4591 ++++++++++++++++++------ 2 files changed, 3431 insertions(+), 1236 deletions(-) diff --git a/crates/core_arch/missing-x86.md b/crates/core_arch/missing-x86.md index 7bc2456ddd..c66e1e728c 100644 --- a/crates/core_arch/missing-x86.md +++ b/crates/core_arch/missing-x86.md @@ -58,8 +58,6 @@ * [ ] [`_mm512_abs_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_abs_ph) * [ ] [`_mm512_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_ph_mask) * [ ] [`_mm512_cmp_round_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_round_ph_mask) - * [ ] [`_mm512_cmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmul_pch) - * [ ] [`_mm512_cmul_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmul_round_pch) * [ ] [`_mm512_conj_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_conj_pch) * [ ] [`_mm512_cvt_roundepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundepi16_ph) * [ ] [`_mm512_cvt_roundepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundepi32_ph) @@ -108,8 +106,6 @@ * [ ] [`_mm512_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtxps_ph) * [ ] [`_mm512_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fcmadd_pch) * [ ] [`_mm512_fcmadd_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fcmadd_round_pch) - * [ ] [`_mm512_fcmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fcmul_pch) - * [ ] [`_mm512_fcmul_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fcmul_round_pch) * [ ] [`_mm512_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmadd_pch) * [ ] [`_mm512_fmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmadd_ph) * [ ] [`_mm512_fmadd_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmadd_round_pch) @@ -120,8 +116,6 @@ * [ ] [`_mm512_fmsub_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmsub_round_ph) * [ ] [`_mm512_fmsubadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmsubadd_ph) * [ ] [`_mm512_fmsubadd_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmsubadd_round_ph) - * [ ] [`_mm512_fmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmul_pch) - * [ ] [`_mm512_fmul_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmul_round_pch) * [ ] [`_mm512_fnmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fnmadd_ph) * [ ] [`_mm512_fnmadd_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fnmadd_round_ph) * [ ] [`_mm512_fnmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fnmsub_ph) @@ -150,8 +144,6 @@ * [ ] [`_mm512_mask_blend_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_blend_ph) * [ ] [`_mm512_mask_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_ph_mask) * [ ] [`_mm512_mask_cmp_round_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_round_ph_mask) - * [ ] [`_mm512_mask_cmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmul_pch) - * [ ] [`_mm512_mask_cmul_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmul_round_pch) * [ ] [`_mm512_mask_conj_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_conj_pch) * [ ] [`_mm512_mask_cvt_roundepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundepi16_ph) * [ ] [`_mm512_mask_cvt_roundepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundepi32_ph) @@ -199,8 +191,6 @@ * [ ] [`_mm512_mask_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtxps_ph) * [ ] [`_mm512_mask_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fcmadd_pch) * [ ] [`_mm512_mask_fcmadd_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fcmadd_round_pch) - * [ ] [`_mm512_mask_fcmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fcmul_pch) - * [ ] [`_mm512_mask_fcmul_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fcmul_round_pch) * [ ] [`_mm512_mask_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmadd_pch) * [ ] [`_mm512_mask_fmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmadd_ph) * [ ] [`_mm512_mask_fmadd_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmadd_round_pch) @@ -211,8 +201,6 @@ * [ ] [`_mm512_mask_fmsub_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmsub_round_ph) * [ ] [`_mm512_mask_fmsubadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmsubadd_ph) * [ ] [`_mm512_mask_fmsubadd_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmsubadd_round_ph) - * [ ] [`_mm512_mask_fmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmul_pch) - * [ ] [`_mm512_mask_fmul_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmul_round_pch) * [ ] [`_mm512_mask_fnmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fnmadd_ph) * [ ] [`_mm512_mask_fnmadd_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fnmadd_round_ph) * [ ] [`_mm512_mask_fnmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fnmsub_ph) @@ -226,8 +214,6 @@ * [ ] [`_mm512_mask_max_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_max_round_ph) * [ ] [`_mm512_mask_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_min_ph) * [ ] [`_mm512_mask_min_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_min_round_ph) - * [ ] [`_mm512_mask_mul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mul_pch) - * [ ] [`_mm512_mask_mul_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mul_round_pch) * [ ] [`_mm512_mask_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_rcp_ph) * [ ] [`_mm512_mask_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_ph) * [ ] [`_mm512_mask_reduce_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_round_ph) @@ -238,8 +224,6 @@ * [ ] [`_mm512_mask_scalef_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_scalef_round_ph) * [ ] [`_mm512_mask_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sqrt_ph) * [ ] [`_mm512_mask_sqrt_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sqrt_round_ph) - * [ ] [`_mm512_maskz_cmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cmul_pch) - * [ ] [`_mm512_maskz_cmul_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cmul_round_pch) * [ ] [`_mm512_maskz_conj_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_conj_pch) * [ ] [`_mm512_maskz_cvt_roundepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepi16_ph) * [ ] [`_mm512_maskz_cvt_roundepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepi32_ph) @@ -287,8 +271,6 @@ * [ ] [`_mm512_maskz_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtxps_ph) * [ ] [`_mm512_maskz_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fcmadd_pch) * [ ] [`_mm512_maskz_fcmadd_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fcmadd_round_pch) - * [ ] [`_mm512_maskz_fcmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fcmul_pch) - * [ ] [`_mm512_maskz_fcmul_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fcmul_round_pch) * [ ] [`_mm512_maskz_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmadd_pch) * [ ] [`_mm512_maskz_fmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmadd_ph) * [ ] [`_mm512_maskz_fmadd_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmadd_round_pch) @@ -299,8 +281,6 @@ * [ ] [`_mm512_maskz_fmsub_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmsub_round_ph) * [ ] [`_mm512_maskz_fmsubadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmsubadd_ph) * [ ] [`_mm512_maskz_fmsubadd_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmsubadd_round_ph) - * [ ] [`_mm512_maskz_fmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmul_pch) - * [ ] [`_mm512_maskz_fmul_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmul_round_pch) * [ ] [`_mm512_maskz_fnmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fnmadd_ph) * [ ] [`_mm512_maskz_fnmadd_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fnmadd_round_ph) * [ ] [`_mm512_maskz_fnmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fnmsub_ph) @@ -313,8 +293,6 @@ * [ ] [`_mm512_maskz_max_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_max_round_ph) * [ ] [`_mm512_maskz_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_min_ph) * [ ] [`_mm512_maskz_min_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_min_round_ph) - * [ ] [`_mm512_maskz_mul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mul_pch) - * [ ] [`_mm512_maskz_mul_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mul_round_pch) * [ ] [`_mm512_maskz_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_rcp_ph) * [ ] [`_mm512_maskz_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_reduce_ph) * [ ] [`_mm512_maskz_reduce_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_reduce_round_ph) @@ -329,8 +307,6 @@ * [ ] [`_mm512_max_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_max_round_ph) * [ ] [`_mm512_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_min_ph) * [ ] [`_mm512_min_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_min_round_ph) - * [ ] [`_mm512_mul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mul_pch) - * [ ] [`_mm512_mul_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mul_round_pch) * [ ] [`_mm512_permutex2var_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutex2var_ph) * [ ] [`_mm512_permutexvar_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutexvar_ph) * [ ] [`_mm512_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_rcp_ph) @@ -348,10 +324,6 @@ * [ ] [`_mm512_set1_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set1_pch) * [ ] [`_mm512_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sqrt_ph) * [ ] [`_mm512_sqrt_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sqrt_round_ph) - * [ ] [`_mm_cmp_round_sh_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_round_sh_mask) - * [ ] [`_mm_cmp_sh_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_sh_mask) - * [ ] [`_mm_cmul_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmul_round_sch) - * [ ] [`_mm_cmul_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmul_sch) * [ ] [`_mm_cvt_roundi32_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundi32_sh) * [ ] [`_mm_cvt_roundi64_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundi64_sh) * [ ] [`_mm_cvt_roundsd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsd_sh) @@ -389,16 +361,12 @@ * [ ] [`_mm_cvtu64_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtu64_sh) * [ ] [`_mm_fcmadd_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fcmadd_round_sch) * [ ] [`_mm_fcmadd_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fcmadd_sch) - * [ ] [`_mm_fcmul_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fcmul_round_sch) - * [ ] [`_mm_fcmul_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fcmul_sch) * [ ] [`_mm_fmadd_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmadd_round_sch) * [ ] [`_mm_fmadd_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmadd_round_sh) * [ ] [`_mm_fmadd_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmadd_sch) * [ ] [`_mm_fmadd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmadd_sh) * [ ] [`_mm_fmsub_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmsub_round_sh) * [ ] [`_mm_fmsub_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmsub_sh) - * [ ] [`_mm_fmul_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmul_round_sch) - * [ ] [`_mm_fmul_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmul_sch) * [ ] [`_mm_fnmadd_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fnmadd_round_sh) * [ ] [`_mm_fnmadd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fnmadd_sh) * [ ] [`_mm_fnmsub_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fnmsub_round_sh) @@ -420,10 +388,6 @@ * [ ] [`_mm_mask3_fnmadd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fnmadd_sh) * [ ] [`_mm_mask3_fnmsub_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fnmsub_round_sh) * [ ] [`_mm_mask3_fnmsub_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fnmsub_sh) - * [ ] [`_mm_mask_cmp_round_sh_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_round_sh_mask) - * [ ] [`_mm_mask_cmp_sh_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_sh_mask) - * [ ] [`_mm_mask_cmul_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmul_round_sch) - * [ ] [`_mm_mask_cmul_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmul_sch) * [ ] [`_mm_mask_cvt_roundsd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvt_roundsd_sh) * [ ] [`_mm_mask_cvt_roundsh_sd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvt_roundsh_sd) * [ ] [`_mm_mask_cvt_roundsh_ss`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvt_roundsh_ss) @@ -434,16 +398,12 @@ * [ ] [`_mm_mask_cvtss_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtss_sh) * [ ] [`_mm_mask_fcmadd_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fcmadd_round_sch) * [ ] [`_mm_mask_fcmadd_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fcmadd_sch) - * [ ] [`_mm_mask_fcmul_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fcmul_round_sch) - * [ ] [`_mm_mask_fcmul_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fcmul_sch) * [ ] [`_mm_mask_fmadd_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmadd_round_sch) * [ ] [`_mm_mask_fmadd_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmadd_round_sh) * [ ] [`_mm_mask_fmadd_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmadd_sch) * [ ] [`_mm_mask_fmadd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmadd_sh) * [ ] [`_mm_mask_fmsub_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmsub_round_sh) * [ ] [`_mm_mask_fmsub_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmsub_sh) - * [ ] [`_mm_mask_fmul_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmul_round_sch) - * [ ] [`_mm_mask_fmul_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmul_sch) * [ ] [`_mm_mask_fnmadd_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fnmadd_round_sh) * [ ] [`_mm_mask_fnmadd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fnmadd_sh) * [ ] [`_mm_mask_fnmsub_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fnmsub_round_sh) @@ -453,8 +413,6 @@ * [ ] [`_mm_mask_getexp_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getexp_sh) * [ ] [`_mm_mask_getmant_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getmant_round_sh) * [ ] [`_mm_mask_getmant_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getmant_sh) - * [ ] [`_mm_mask_mul_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mul_round_sch) - * [ ] [`_mm_mask_mul_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mul_sch) * [ ] [`_mm_mask_rcp_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_rcp_sh) * [ ] [`_mm_mask_reduce_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_reduce_round_sh) * [ ] [`_mm_mask_reduce_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_reduce_sh) @@ -465,8 +423,6 @@ * [ ] [`_mm_mask_scalef_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_scalef_sh) * [ ] [`_mm_mask_sqrt_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sqrt_round_sh) * [ ] [`_mm_mask_sqrt_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sqrt_sh) - * [ ] [`_mm_maskz_cmul_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cmul_round_sch) - * [ ] [`_mm_maskz_cmul_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cmul_sch) * [ ] [`_mm_maskz_cvt_roundsd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvt_roundsd_sh) * [ ] [`_mm_maskz_cvt_roundsh_sd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvt_roundsh_sd) * [ ] [`_mm_maskz_cvt_roundsh_ss`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvt_roundsh_ss) @@ -477,16 +433,12 @@ * [ ] [`_mm_maskz_cvtss_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtss_sh) * [ ] [`_mm_maskz_fcmadd_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fcmadd_round_sch) * [ ] [`_mm_maskz_fcmadd_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fcmadd_sch) - * [ ] [`_mm_maskz_fcmul_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fcmul_round_sch) - * [ ] [`_mm_maskz_fcmul_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fcmul_sch) * [ ] [`_mm_maskz_fmadd_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmadd_round_sch) * [ ] [`_mm_maskz_fmadd_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmadd_round_sh) * [ ] [`_mm_maskz_fmadd_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmadd_sch) * [ ] [`_mm_maskz_fmadd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmadd_sh) * [ ] [`_mm_maskz_fmsub_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmsub_round_sh) * [ ] [`_mm_maskz_fmsub_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmsub_sh) - * [ ] [`_mm_maskz_fmul_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmul_round_sch) - * [ ] [`_mm_maskz_fmul_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmul_sch) * [ ] [`_mm_maskz_fnmadd_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fnmadd_round_sh) * [ ] [`_mm_maskz_fnmadd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fnmadd_sh) * [ ] [`_mm_maskz_fnmsub_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fnmsub_round_sh) @@ -495,8 +447,6 @@ * [ ] [`_mm_maskz_getexp_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getexp_sh) * [ ] [`_mm_maskz_getmant_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getmant_round_sh) * [ ] [`_mm_maskz_getmant_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getmant_sh) - * [ ] [`_mm_maskz_mul_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mul_round_sch) - * [ ] [`_mm_maskz_mul_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mul_sch) * [ ] [`_mm_maskz_rcp_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_rcp_sh) * [ ] [`_mm_maskz_reduce_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_reduce_round_sh) * [ ] [`_mm_maskz_reduce_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_reduce_sh) @@ -507,8 +457,6 @@ * [ ] [`_mm_maskz_scalef_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_scalef_sh) * [ ] [`_mm_maskz_sqrt_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sqrt_round_sh) * [ ] [`_mm_maskz_sqrt_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sqrt_sh) - * [ ] [`_mm_mul_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_round_sch) - * [ ] [`_mm_mul_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_sch) * [ ] [`_mm_rcp_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rcp_sh) * [ ] [`_mm_reduce_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_round_sh) * [ ] [`_mm_reduce_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_sh) @@ -527,7 +475,6 @@ * [ ] [`_mm256_abs_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_abs_ph) * [ ] [`_mm256_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmp_ph_mask) - * [ ] [`_mm256_cmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmul_pch) * [ ] [`_mm256_conj_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_conj_pch) * [ ] [`_mm256_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi16_ph) * [ ] [`_mm256_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi32_ph) @@ -552,13 +499,11 @@ * [ ] [`_mm256_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtxph_ps) * [ ] [`_mm256_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtxps_ph) * [ ] [`_mm256_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fcmadd_pch) - * [ ] [`_mm256_fcmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fcmul_pch) * [ ] [`_mm256_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fmadd_pch) * [ ] [`_mm256_fmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fmadd_ph) * [ ] [`_mm256_fmaddsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fmaddsub_ph) * [ ] [`_mm256_fmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fmsub_ph) * [ ] [`_mm256_fmsubadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fmsubadd_ph) - * [ ] [`_mm256_fmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fmul_pch) * [ ] [`_mm256_fnmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fnmadd_ph) * [ ] [`_mm256_fnmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fnmsub_ph) * [ ] [`_mm256_fpclass_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fpclass_ph_mask) @@ -574,7 +519,6 @@ * [ ] [`_mm256_mask3_fnmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fnmsub_ph) * [ ] [`_mm256_mask_blend_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_blend_ph) * [ ] [`_mm256_mask_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmp_ph_mask) - * [ ] [`_mm256_mask_cmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmul_pch) * [ ] [`_mm256_mask_conj_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_conj_pch) * [ ] [`_mm256_mask_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi16_ph) * [ ] [`_mm256_mask_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi32_ph) @@ -599,13 +543,11 @@ * [ ] [`_mm256_mask_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtxph_ps) * [ ] [`_mm256_mask_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtxps_ph) * [ ] [`_mm256_mask_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fcmadd_pch) - * [ ] [`_mm256_mask_fcmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fcmul_pch) * [ ] [`_mm256_mask_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmadd_pch) * [ ] [`_mm256_mask_fmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmadd_ph) * [ ] [`_mm256_mask_fmaddsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmaddsub_ph) * [ ] [`_mm256_mask_fmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmsub_ph) * [ ] [`_mm256_mask_fmsubadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmsubadd_ph) - * [ ] [`_mm256_mask_fmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmul_pch) * [ ] [`_mm256_mask_fnmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fnmadd_ph) * [ ] [`_mm256_mask_fnmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fnmsub_ph) * [ ] [`_mm256_mask_fpclass_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fpclass_ph_mask) @@ -613,14 +555,12 @@ * [ ] [`_mm256_mask_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_getmant_ph) * [ ] [`_mm256_mask_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_max_ph) * [ ] [`_mm256_mask_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_min_ph) - * [ ] [`_mm256_mask_mul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_mul_pch) * [ ] [`_mm256_mask_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_rcp_ph) * [ ] [`_mm256_mask_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_reduce_ph) * [ ] [`_mm256_mask_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_roundscale_ph) * [ ] [`_mm256_mask_rsqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_rsqrt_ph) * [ ] [`_mm256_mask_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_scalef_ph) * [ ] [`_mm256_mask_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_sqrt_ph) - * [ ] [`_mm256_maskz_cmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cmul_pch) * [ ] [`_mm256_maskz_conj_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_conj_pch) * [ ] [`_mm256_maskz_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi16_ph) * [ ] [`_mm256_maskz_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi32_ph) @@ -645,20 +585,17 @@ * [ ] [`_mm256_maskz_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtxph_ps) * [ ] [`_mm256_maskz_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtxps_ph) * [ ] [`_mm256_maskz_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fcmadd_pch) - * [ ] [`_mm256_maskz_fcmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fcmul_pch) * [ ] [`_mm256_maskz_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmadd_pch) * [ ] [`_mm256_maskz_fmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmadd_ph) * [ ] [`_mm256_maskz_fmaddsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmaddsub_ph) * [ ] [`_mm256_maskz_fmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmsub_ph) * [ ] [`_mm256_maskz_fmsubadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmsubadd_ph) - * [ ] [`_mm256_maskz_fmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmul_pch) * [ ] [`_mm256_maskz_fnmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fnmadd_ph) * [ ] [`_mm256_maskz_fnmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fnmsub_ph) * [ ] [`_mm256_maskz_getexp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_getexp_ph) * [ ] [`_mm256_maskz_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_getmant_ph) * [ ] [`_mm256_maskz_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_max_ph) * [ ] [`_mm256_maskz_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_min_ph) - * [ ] [`_mm256_maskz_mul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_mul_pch) * [ ] [`_mm256_maskz_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_rcp_ph) * [ ] [`_mm256_maskz_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_reduce_ph) * [ ] [`_mm256_maskz_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_roundscale_ph) @@ -667,7 +604,6 @@ * [ ] [`_mm256_maskz_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_sqrt_ph) * [ ] [`_mm256_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_max_ph) * [ ] [`_mm256_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_min_ph) - * [ ] [`_mm256_mul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mul_pch) * [ ] [`_mm256_permutex2var_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutex2var_ph) * [ ] [`_mm256_permutexvar_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutexvar_ph) * [ ] [`_mm256_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_rcp_ph) @@ -682,7 +618,6 @@ * [ ] [`_mm256_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sqrt_ph) * [ ] [`_mm_abs_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_ph) * [ ] [`_mm_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_ph_mask) - * [ ] [`_mm_cmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmul_pch) * [ ] [`_mm_conj_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_conj_pch) * [ ] [`_mm_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi16_ph) * [ ] [`_mm_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi32_ph) @@ -707,13 +642,11 @@ * [ ] [`_mm_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtxph_ps) * [ ] [`_mm_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtxps_ph) * [ ] [`_mm_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fcmadd_pch) - * [ ] [`_mm_fcmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fcmul_pch) * [ ] [`_mm_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmadd_pch) * [ ] [`_mm_fmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmadd_ph) * [ ] [`_mm_fmaddsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmaddsub_ph) * [ ] [`_mm_fmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmsub_ph) * [ ] [`_mm_fmsubadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmsubadd_ph) - * [ ] [`_mm_fmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmul_pch) * [ ] [`_mm_fnmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fnmadd_ph) * [ ] [`_mm_fnmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fnmsub_ph) * [ ] [`_mm_fpclass_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fpclass_ph_mask) @@ -729,7 +662,6 @@ * [ ] [`_mm_mask3_fnmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fnmsub_ph) * [ ] [`_mm_mask_blend_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_blend_ph) * [ ] [`_mm_mask_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_ph_mask) - * [ ] [`_mm_mask_cmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmul_pch) * [ ] [`_mm_mask_conj_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_conj_pch) * [ ] [`_mm_mask_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi16_ph) * [ ] [`_mm_mask_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi32_ph) @@ -754,13 +686,11 @@ * [ ] [`_mm_mask_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtxph_ps) * [ ] [`_mm_mask_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtxps_ph) * [ ] [`_mm_mask_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fcmadd_pch) - * [ ] [`_mm_mask_fcmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fcmul_pch) * [ ] [`_mm_mask_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmadd_pch) * [ ] [`_mm_mask_fmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmadd_ph) * [ ] [`_mm_mask_fmaddsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmaddsub_ph) * [ ] [`_mm_mask_fmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmsub_ph) * [ ] [`_mm_mask_fmsubadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmsubadd_ph) - * [ ] [`_mm_mask_fmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmul_pch) * [ ] [`_mm_mask_fnmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fnmadd_ph) * [ ] [`_mm_mask_fnmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fnmsub_ph) * [ ] [`_mm_mask_fpclass_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fpclass_ph_mask) @@ -772,14 +702,12 @@ * [ ] [`_mm_mask_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_min_ph) * [ ] [`_mm_mask_min_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_min_round_sh) * [ ] [`_mm_mask_min_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_min_sh) - * [ ] [`_mm_mask_mul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mul_pch) * [ ] [`_mm_mask_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_rcp_ph) * [ ] [`_mm_mask_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_reduce_ph) * [ ] [`_mm_mask_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_roundscale_ph) * [ ] [`_mm_mask_rsqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_rsqrt_ph) * [ ] [`_mm_mask_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_scalef_ph) * [ ] [`_mm_mask_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sqrt_ph) - * [ ] [`_mm_maskz_cmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cmul_pch) * [ ] [`_mm_maskz_conj_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_conj_pch) * [ ] [`_mm_maskz_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi16_ph) * [ ] [`_mm_maskz_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi32_ph) @@ -804,13 +732,11 @@ * [ ] [`_mm_maskz_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtxph_ps) * [ ] [`_mm_maskz_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtxps_ph) * [ ] [`_mm_maskz_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fcmadd_pch) - * [ ] [`_mm_maskz_fcmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fcmul_pch) * [ ] [`_mm_maskz_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmadd_pch) * [ ] [`_mm_maskz_fmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmadd_ph) * [ ] [`_mm_maskz_fmaddsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmaddsub_ph) * [ ] [`_mm_maskz_fmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmsub_ph) * [ ] [`_mm_maskz_fmsubadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmsubadd_ph) - * [ ] [`_mm_maskz_fmul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmul_pch) * [ ] [`_mm_maskz_fnmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fnmadd_ph) * [ ] [`_mm_maskz_fnmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fnmsub_ph) * [ ] [`_mm_maskz_getexp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getexp_ph) @@ -821,7 +747,6 @@ * [ ] [`_mm_maskz_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_min_ph) * [ ] [`_mm_maskz_min_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_min_round_sh) * [ ] [`_mm_maskz_min_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_min_sh) - * [ ] [`_mm_maskz_mul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mul_pch) * [ ] [`_mm_maskz_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_rcp_ph) * [ ] [`_mm_maskz_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_reduce_ph) * [ ] [`_mm_maskz_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_roundscale_ph) @@ -834,7 +759,6 @@ * [ ] [`_mm_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_ph) * [ ] [`_mm_min_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_round_sh) * [ ] [`_mm_min_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_sh) - * [ ] [`_mm_mul_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_pch) * [ ] [`_mm_permutex2var_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permutex2var_ph) * [ ] [`_mm_permutexvar_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permutexvar_ph) * [ ] [`_mm_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rcp_ph) diff --git a/crates/core_arch/src/x86/avx512fp16.rs b/crates/core_arch/src/x86/avx512fp16.rs index c6eeff1904..a2a31d87e9 100644 --- a/crates/core_arch/src/x86/avx512fp16.rs +++ b/crates/core_arch/src/x86/avx512fp16.rs @@ -615,6 +615,69 @@ pub unsafe fn _mm512_zextph128_ph512(a: __m128h) -> __m512h { ) } +/// Compare the lower half-precision (16-bit) floating-point elements in a and b based on the comparison +/// operand specified by imm8, and store the result in mask vector k. Exceptions can be suppressed by +/// passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_round_sh_mask) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[rustc_legacy_const_generics(2, 3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cmp_round_sh_mask( + a: __m128h, + b: __m128h, +) -> __mmask8 { + static_assert_sae!(SAE); + _mm_mask_cmp_round_sh_mask::(0xff, a, b) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b based on the comparison +/// operand specified by imm8, and store the result in mask vector k using zeromask k1. Exceptions can be +/// suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_round_sh_mask) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[rustc_legacy_const_generics(3, 4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cmp_round_sh_mask( + k1: __mmask8, + a: __m128h, + b: __m128h, +) -> __mmask8 { + static_assert_sae!(SAE); + vcmpsh(a, b, IMM8, k1, SAE) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b based on the comparison +/// operand specified by imm8, and store the result in mask vector k. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_sh_mask) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cmp_sh_mask(a: __m128h, b: __m128h) -> __mmask8 { + _mm_cmp_round_sh_mask::(a, b) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b based on the comparison +/// operand specified by imm8, and store the result in mask vector k using zeromask k1. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_sh_mask) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cmp_sh_mask( + k1: __mmask8, + a: __m128h, + b: __m128h, +) -> __mmask8 { + _mm_mask_cmp_round_sh_mask::(k1, a, b) +} + /// Cast vector of type `__m256h` to type `__m512h`. The upper 16 elements of the result are zeroed. /// This intrinsic can generate the `vzeroupper` instruction, but most of the time it does not generate /// any instructions. @@ -1236,7 +1299,7 @@ pub unsafe fn _mm512_maskz_add_round_ph( /// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions /// _MM_FROUND_CUR_DIRECTION /// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_round_ph) +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_round_sh) #[inline] #[target_feature(enable = "avx512fp16")] #[cfg_attr(test, assert_instr(vaddsh, ROUNDING = 8))] @@ -2227,1778 +2290,3986 @@ pub unsafe fn _mm_maskz_div_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { _mm_maskz_div_round_sh::<_MM_FROUND_CUR_DIRECTION>(k, a, b) } -#[allow(improper_ctypes)] -extern "C" { - #[link_name = "llvm.x86.avx512fp16.vcomi.sh"] - fn vcomish(a: __m128h, b: __m128h, imm8: i32, sae: i32) -> i32; +/// Multiply packed complex numbers in a and b, and store the results in dst. Each complex number is +/// composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex +/// number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mul_pch(a: __m128h, b: __m128h) -> __m128h { + _mm_mask_mul_pch(_mm_undefined_ph(), 0xff, a, b) +} - #[link_name = "llvm.x86.avx512fp16.add.ph.512"] - fn vaddph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.sub.ph.512"] - fn vsubph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mul.ph.512"] - fn vmulph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.div.ph.512"] - fn vdivph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; +/// Multiply packed complex numbers in a and b, and store the results in dst using writemask k (the element +/// is copied from src when mask bit 0 is not set). Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mul_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_mul_pch(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + transmute(vfmulcph_128(transmute(a), transmute(b), transmute(src), k)) +} - #[link_name = "llvm.x86.avx512fp16.mask.add.sh.round"] - fn vaddsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.sub.sh.round"] - fn vsubsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.mul.sh.round"] - fn vmulsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.div.sh.round"] - fn vdivsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; +/// Multiply packed complex numbers in a and b, and store the results in dst using zeromask k (the element +/// is zeroed out when mask bit 0 is not set). Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mul_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_mul_pch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_mul_pch(_mm_setzero_ph(), k, a, b) +} +/// Multiply packed complex numbers in a and b, and store the results in dst. Each complex number is +/// composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex +/// number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mul_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mul_pch(a: __m256h, b: __m256h) -> __m256h { + _mm256_mask_mul_pch(_mm256_undefined_ph(), 0xff, a, b) } -#[cfg(test)] -mod tests { - use crate::core_arch::x86::*; - use crate::mem::transmute; - use crate::ptr::{addr_of, addr_of_mut}; - use stdarch_test::simd_test; +/// Multiply packed complex numbers in a and b, and store the results in dst using writemask k (the element +/// is copied from src when mask bit 0 is not set). Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_mul_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_mul_pch(src: __m256h, k: __mmask8, a: __m256h, b: __m256h) -> __m256h { + transmute(vfmulcph_256(transmute(a), transmute(b), transmute(src), k)) +} - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_set_ph() { - let r = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let e = _mm_setr_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - assert_eq_m128h(r, e); - } +/// Multiply packed complex numbers in a and b, and store the results in dst using zeromask k (the element +/// is zeroed out when mask bit 0 is not set). Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_mul_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_mul_pch(k: __mmask8, a: __m256h, b: __m256h) -> __m256h { + _mm256_mask_mul_pch(_mm256_setzero_ph(), k, a, b) +} - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_set_ph() { - let r = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let e = _mm256_setr_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - assert_eq_m256h(r, e); - } +/// Multiply packed complex numbers in a and b, and store the results in dst. Each complex number is +/// composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex +/// number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mul_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mul_pch(a: __m512h, b: __m512h) -> __m512h { + _mm512_mask_mul_pch(_mm512_undefined_ph(), 0xffff, a, b) +} - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_set_ph() { - let r = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let e = _mm512_setr_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - assert_eq_m512h(r, e); - } +/// Multiply packed complex numbers in a and b, and store the results in dst using writemask k (the element +/// is copied from src when mask bit 0 is not set). Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mul_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_mul_pch(src: __m512h, k: __mmask16, a: __m512h, b: __m512h) -> __m512h { + _mm512_mask_mul_round_pch::<_MM_FROUND_CUR_DIRECTION>(src, k, a, b) +} - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_set_sh() { - let r = _mm_set_sh(1.0); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0); - assert_eq_m128h(r, e); - } +/// Multiply packed complex numbers in a and b, and store the results in dst using zeromask k (the element +/// is zeroed out when mask bit 0 is not set). Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mul_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_mul_pch(k: __mmask16, a: __m512h, b: __m512h) -> __m512h { + _mm512_mask_mul_pch(_mm512_setzero_ph(), k, a, b) +} - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_set1_ph() { - let r = _mm_set1_ph(1.0); - let e = _mm_set_ph(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0); - assert_eq_m128h(r, e); - } +/// Multiply the packed complex numbers in a and b, and store the results in dst. Each complex number is +/// composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex +/// number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mul_round_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmulcph, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mul_round_pch(a: __m512h, b: __m512h) -> __m512h { + static_assert_rounding!(ROUNDING); + _mm512_mask_mul_round_pch::(_mm512_undefined_ph(), 0xffff, a, b) +} - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_set1_ph() { - let r = _mm256_set1_ph(1.0); - let e = _mm256_set_ph( - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - ); - assert_eq_m256h(r, e); - } +/// Multiply the packed complex numbers in a and b, and store the results in dst using writemask k (the element +/// is copied from src when mask bit 0 is not set). Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mul_round_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmulcph, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_mul_round_pch( + src: __m512h, + k: __mmask16, + a: __m512h, + b: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + transmute(vfmulcph_512( + transmute(a), + transmute(b), + transmute(src), + k, + ROUNDING, + )) +} - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_set1_ph() { - let r = _mm512_set1_ph(1.0); - let e = _mm512_set_ph( - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - ); - assert_eq_m512h(r, e); - } +/// Multiply the packed complex numbers in a and b, and store the results in dst using zeromask k (the element +/// is zeroed out when mask bit 0 is not set). Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mul_round_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmulcph, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_mul_round_pch( + k: __mmask16, + a: __m512h, + b: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + _mm512_mask_mul_round_pch::(_mm512_setzero_ph(), k, a, b) +} - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_setr_ph() { - let r = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let e = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - assert_eq_m128h(r, e); - } +/// Multiply the lower complex numbers in a and b, and store the result in the lower elements of dst, +/// and copy the upper 6 packed elements from a to the upper elements of dst. Each complex number is +/// composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex +/// number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmulcsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mul_sch(a: __m128h, b: __m128h) -> __m128h { + _mm_mask_mul_sch(_mm_undefined_ph(), 0xff, a, b) +} - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_setr_ph() { - let r = _mm256_setr_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let e = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - assert_eq_m256h(r, e); - } +/// Multiply the lower complex numbers in a and b, and store the result in the lower elements of dst using +/// writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper 6 packed +/// elements from a to the upper elements of dst. Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mul_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmulcsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_mul_sch(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_mul_round_sch::<_MM_FROUND_CUR_DIRECTION>(src, k, a, b) +} - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_setr_ph() { - let r = _mm512_setr_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let e = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, +/// Multiply the lower complex numbers in a and b, and store the result in the lower elements of dst using +/// zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 6 packed elements +/// from a to the upper elements of dst. Each complex number is composed of two adjacent half-precision +/// (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mul_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmulcsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_mul_sch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_mul_sch(_mm_setzero_ph(), k, a, b) +} + +/// Multiply the lower complex numbers in a and b, and store the result in the lower elements of dst, +/// and copy the upper 6 packed elements from a to the upper elements of dst. Each complex number is +/// composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex +/// number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_round_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmulcsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mul_round_sch(a: __m128h, b: __m128h) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm_mask_mul_round_sch::(_mm_undefined_ph(), 0xff, a, b) +} + +/// Multiply the lower complex numbers in a and b, and store the result in the lower elements of dst using +/// writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper 6 packed +/// elements from a to the upper elements of dst. Each complex number is composed of two adjacent half-precision +/// (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mul_round_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmulcsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_mul_round_sch( + src: __m128h, + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + transmute(vfmulcsh( + transmute(a), + transmute(b), + transmute(src), + k, + ROUNDING, + )) +} + +/// Multiply the lower complex numbers in a and b, and store the result in the lower elements of dst using +/// zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 6 packed elements +/// from a to the upper elements of dst. Each complex number is composed of two adjacent half-precision +/// (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mul_round_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmulcsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_mul_round_sch( + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm_mask_mul_round_sch::(_mm_setzero_ph(), k, a, b) +} + +/// Multiply packed complex numbers in a and b, and store the results in dst. Each complex number is +/// composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex +/// number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fmul_pch(a: __m128h, b: __m128h) -> __m128h { + _mm_mul_pch(a, b) +} + +/// Multiply packed complex numbers in a and b, and store the results in dst using writemask k (the element +/// is copied from src when mask bit 0 is not set). Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fmul_pch(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_mul_pch(src, k, a, b) +} + +/// Multiply packed complex numbers in a and b, and store the results in dst using zeromask k (the element +/// is zeroed out when mask bit 0 is not set). Each complex number is composed of two adjacent half-precision +/// (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_fmul_pch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_maskz_mul_pch(k, a, b) +} + +/// Multiply packed complex numbers in a and b, and store the results in dst. Each complex number is +/// composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex +/// number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_fmul_pch(a: __m256h, b: __m256h) -> __m256h { + _mm256_mul_pch(a, b) +} + +/// Multiply packed complex numbers in a and b, and store the results in dst using writemask k (the element +/// is copied from src when mask bit 0 is not set). Each complex number is composed of two adjacent half-precision +/// (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_fmul_pch(src: __m256h, k: __mmask8, a: __m256h, b: __m256h) -> __m256h { + _mm256_mask_mul_pch(src, k, a, b) +} + +/// Multiply packed complex numbers in a and b, and store the results in dst using zeromask k (the element +/// is zeroed out when mask bit 0 is not set). Each complex number is composed of two adjacent half-precision +/// (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_fmul_pch(k: __mmask8, a: __m256h, b: __m256h) -> __m256h { + _mm256_maskz_mul_pch(k, a, b) +} + +/// Multiply packed complex numbers in a and b, and store the results in dst. Each complex number is composed +/// of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_fmul_pch(a: __m512h, b: __m512h) -> __m512h { + _mm512_mul_pch(a, b) +} + +/// Multiply packed complex numbers in a and b, and store the results in dst using writemask k (the element +/// is copied from src when mask bit 0 is not set). Each complex number is composed of two adjacent half-precision +/// (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_fmul_pch(src: __m512h, k: __mmask16, a: __m512h, b: __m512h) -> __m512h { + _mm512_mask_mul_pch(src, k, a, b) +} + +/// Multiply packed complex numbers in a and b, and store the results in dst using zeromask k (the element +/// is zeroed out when mask bit 0 is not set). Each complex number is composed of two adjacent half-precision +/// (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_fmul_pch(k: __mmask16, a: __m512h, b: __m512h) -> __m512h { + _mm512_maskz_mul_pch(k, a, b) +} + +/// Multiply packed complex numbers in a and b, and store the results in dst. Each complex number is composed +/// of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmul_round_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmulcph, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_fmul_round_pch(a: __m512h, b: __m512h) -> __m512h { + static_assert_rounding!(ROUNDING); + _mm512_mul_round_pch::(a, b) +} + +/// Multiply packed complex numbers in a and b, and store the results in dst using writemask k (the element +/// is copied from src when mask bit 0 is not set). Each complex number is composed of two adjacent half-precision +/// (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmul_round_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmulcph, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_fmul_round_pch( + src: __m512h, + k: __mmask16, + a: __m512h, + b: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + _mm512_mask_mul_round_pch::(src, k, a, b) +} + +/// Multiply packed complex numbers in a and b, and store the results in dst using zeromask k (the element +/// is zeroed out when mask bit 0 is not set). Each complex number is composed of two adjacent half-precision +/// (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmul_round_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmulcph, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_fmul_round_pch( + k: __mmask16, + a: __m512h, + b: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + _mm512_maskz_mul_round_pch::(k, a, b) +} + +/// Multiply the lower complex numbers in a and b, and store the results in dst. Each complex number is +/// composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex +/// number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmul_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmulcsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fmul_sch(a: __m128h, b: __m128h) -> __m128h { + _mm_mul_sch(a, b) +} + +/// Multiply the lower complex numbers in a and b, and store the results in dst using writemask k (the element +/// is copied from src when mask bit 0 is not set). Each complex number is composed of two adjacent half-precision +/// (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmul_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmulcsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fmul_sch(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_mul_sch(src, k, a, b) +} + +/// Multiply the lower complex numbers in a and b, and store the results in dst using zeromask k (the element +/// is zeroed out when mask bit 0 is not set). Each complex number is composed of two adjacent half-precision +/// (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmul_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmulcsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_fmul_sch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_maskz_mul_sch(k, a, b) +} + +/// Multiply the lower complex numbers in a and b, and store the results in dst. Each complex number is composed +/// of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmul_round_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmulcsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fmul_round_sch(a: __m128h, b: __m128h) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm_mul_round_sch::(a, b) +} + +/// Multiply the lower complex numbers in a and b, and store the results in dst using writemask k (the element +/// is copied from src when mask bit 0 is not set). Each complex number is composed of two adjacent half-precision +/// (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmul_round_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmulcsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fmul_round_sch( + src: __m128h, + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm_mask_mul_round_sch::(src, k, a, b) +} + +/// Multiply the lower complex numbers in a and b, and store the results in dst using zeromask k (the element +/// is zeroed out when mask bit 0 is not set). Each complex number is composed of two adjacent half-precision +/// (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmul_round_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmulcsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_fmul_round_sch( + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm_maskz_mul_round_sch::(k, a, b) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and +/// store the results in dst. Each complex number is composed of two adjacent half-precision (16-bit) +/// floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfcmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cmul_pch(a: __m128h, b: __m128h) -> __m128h { + _mm_mask_cmul_pch(_mm_undefined_ph(), 0xff, a, b) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and +/// store the results in dst using writemask k (the element is copied from src when mask bit 0 is not set). +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which +/// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfcmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cmul_pch(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + transmute(vfcmulcph_128(transmute(a), transmute(b), transmute(src), k)) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and +/// store the results in dst using zeromask k (the element is zeroed out when mask bit 0 is not set). +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which +/// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfcmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cmul_pch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_cmul_pch(_mm_setzero_ph(), k, a, b) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and +/// store the results in dst. Each complex number is composed of two adjacent half-precision (16-bit) +/// floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfcmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cmul_pch(a: __m256h, b: __m256h) -> __m256h { + _mm256_mask_cmul_pch(_mm256_undefined_ph(), 0xff, a, b) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and +/// store the results in dst using writemask k (the element is copied from src when mask bit 0 is not set). +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which +/// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfcmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_cmul_pch(src: __m256h, k: __mmask8, a: __m256h, b: __m256h) -> __m256h { + transmute(vfcmulcph_256(transmute(a), transmute(b), transmute(src), k)) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and +/// store the results in dst using zeromask k (the element is zeroed out when mask bit 0 is not set). +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which +/// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfcmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_cmul_pch(k: __mmask8, a: __m256h, b: __m256h) -> __m256h { + _mm256_mask_cmul_pch(_mm256_setzero_ph(), k, a, b) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and +/// store the results in dst. Each complex number is composed of two adjacent half-precision (16-bit) +/// floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cmul_pch(a: __m512h, b: __m512h) -> __m512h { + _mm512_mask_cmul_pch(_mm512_undefined_ph(), 0xffff, a, b) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and +/// store the results in dst using writemask k (the element is copied from src when mask bit 0 is not set). +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which +/// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cmul_pch(src: __m512h, k: __mmask16, a: __m512h, b: __m512h) -> __m512h { + _mm512_mask_cmul_round_pch::<_MM_FROUND_CUR_DIRECTION>(src, k, a, b) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and +/// store the results in dst using zeromask k (the element is zeroed out when mask bit 0 is not set). +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which +/// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cmul_pch(k: __mmask16, a: __m512h, b: __m512h) -> __m512h { + _mm512_mask_cmul_pch(_mm512_setzero_ph(), k, a, b) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and +/// store the results in dst. Each complex number is composed of two adjacent half-precision (16-bit) +/// floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmul_round_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmulcph, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cmul_round_pch(a: __m512h, b: __m512h) -> __m512h { + static_assert_rounding!(ROUNDING); + _mm512_mask_cmul_round_pch::(_mm512_undefined_ph(), 0xffff, a, b) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and +/// store the results in dst using writemask k (the element is copied from src when mask bit 0 is not set). +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which +/// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmul_round_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmulcph, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cmul_round_pch( + src: __m512h, + k: __mmask16, + a: __m512h, + b: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + transmute(vfcmulcph_512( + transmute(a), + transmute(b), + transmute(src), + k, + ROUNDING, + )) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and +/// store the results in dst using zeromask k (the element is zeroed out when mask bit 0 is not set). +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which +/// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cmul_round_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmulcph, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cmul_round_pch( + k: __mmask16, + a: __m512h, + b: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + _mm512_mask_cmul_round_pch::(_mm512_setzero_ph(), k, a, b) +} + +/// Multiply the lower complex numbers in a by the complex conjugates of the lower complex numbers in b, +/// and store the results in dst. Each complex number is composed of two adjacent half-precision (16-bit) +/// floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmul_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmulcsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cmul_sch(a: __m128h, b: __m128h) -> __m128h { + _mm_mask_cmul_sch(_mm_undefined_ph(), 0xff, a, b) +} + +/// Multiply the lower complex numbers in a by the complex conjugates of the lower complex numbers in b, +/// and store the results in dst using writemask k (the element is copied from src when mask bit 0 is not set). +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which +/// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmul_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmulcsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cmul_sch(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_cmul_round_sch::<_MM_FROUND_CUR_DIRECTION>(src, k, a, b) +} + +/// Multiply the lower complex numbers in a by the complex conjugates of the lower complex numbers in b, +/// and store the results in dst using zeromask k (the element is zeroed out when mask bit 0 is not set). +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which +/// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cmul_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmulcsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cmul_sch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_cmul_sch(_mm_setzero_ph(), k, a, b) +} + +/// Multiply the lower complex numbers in a by the complex conjugates of the lower complex numbers in b, +/// and store the results in dst. Each complex number is composed of two adjacent half-precision (16-bit) +/// floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmul_round_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmulcsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cmul_round_sch(a: __m128h, b: __m128h) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm_mask_cmul_round_sch::(_mm_undefined_ph(), 0xff, a, b) +} + +/// Multiply the lower complex numbers in a by the complex conjugates of the lower complex numbers in b, +/// and store the results in dst using writemask k (the element is copied from src when mask bit 0 is not set). +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which +/// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmul_round_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmulcsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cmul_round_sch( + src: __m128h, + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + transmute(vfcmulcsh( + transmute(a), + transmute(b), + transmute(src), + k, + ROUNDING, + )) +} + +/// Multiply the lower complex numbers in a by the complex conjugates of the lower complex numbers in b, +/// and store the results in dst using zeromask k (the element is zeroed out when mask bit 0 is not set). +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which +/// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cmul_round_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmulcsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cmul_round_sch( + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm_mask_cmul_round_sch::(_mm_setzero_ph(), k, a, b) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and +/// store the results in dst. Each complex number is composed of two adjacent half-precision (16-bit) +/// floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fcmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfcmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fcmul_pch(a: __m128h, b: __m128h) -> __m128h { + _mm_cmul_pch(a, b) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and +/// store the results in dst using writemask k (the element is copied from src when mask bit 0 is not set). +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which +/// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fcmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfcmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fcmul_pch(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_cmul_pch(src, k, a, b) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and +/// store the results in dst using zeromask k (the element is zeroed out when mask bit 0 is not set). +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which +/// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fcmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfcmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_fcmul_pch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_maskz_cmul_pch(k, a, b) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and +/// store the results in dst. Each complex number is composed of two adjacent half-precision (16-bit) +/// floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fcmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfcmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_fcmul_pch(a: __m256h, b: __m256h) -> __m256h { + _mm256_cmul_pch(a, b) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and +/// store the results in dst using writemask k (the element is copied from src when mask bit 0 is not set). +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which +/// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fcmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfcmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_fcmul_pch(src: __m256h, k: __mmask8, a: __m256h, b: __m256h) -> __m256h { + _mm256_mask_cmul_pch(src, k, a, b) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and +/// store the results in dst using zeromask k (the element is zeroed out when mask bit 0 is not set). +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which +/// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fcmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfcmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_fcmul_pch(k: __mmask8, a: __m256h, b: __m256h) -> __m256h { + _mm256_maskz_cmul_pch(k, a, b) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and +/// store the results in dst. Each complex number is composed of two adjacent half-precision (16-bit) +/// floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fcmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_fcmul_pch(a: __m512h, b: __m512h) -> __m512h { + _mm512_cmul_pch(a, b) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and +/// store the results in dst using writemask k (the element is copied from src when mask bit 0 is not set). +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which +/// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fcmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_fcmul_pch(src: __m512h, k: __mmask16, a: __m512h, b: __m512h) -> __m512h { + _mm512_mask_cmul_pch(src, k, a, b) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and +/// store the results in dst using zeromask k (the element is zeroed out when mask bit 0 is not set). +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which +/// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fcmul_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmulcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_fcmul_pch(k: __mmask16, a: __m512h, b: __m512h) -> __m512h { + _mm512_maskz_cmul_pch(k, a, b) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and +/// store the results in dst. Each complex number is composed of two adjacent half-precision (16-bit) +/// floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fcmul_round_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmulcph, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_fcmul_round_pch(a: __m512h, b: __m512h) -> __m512h { + static_assert_rounding!(ROUNDING); + _mm512_cmul_round_pch::(a, b) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and +/// store the results in dst using writemask k (the element is copied from src when mask bit 0 is not set). +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which +/// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fcmul_round_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmulcph, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_fcmul_round_pch( + src: __m512h, + k: __mmask16, + a: __m512h, + b: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + _mm512_mask_cmul_round_pch::(src, k, a, b) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and +/// store the results in dst using zeromask k (the element is zeroed out when mask bit 0 is not set). +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which +/// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fcmul_round_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmulcph, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_fcmul_round_pch( + k: __mmask16, + a: __m512h, + b: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + _mm512_maskz_cmul_round_pch::(k, a, b) +} + +/// Multiply the lower complex numbers in a by the complex conjugates of the lower complex numbers in b, +/// and store the results in dst. Each complex number is composed of two adjacent half-precision (16-bit) +/// floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fcmul_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmulcsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fcmul_sch(a: __m128h, b: __m128h) -> __m128h { + _mm_cmul_sch(a, b) +} + +/// Multiply the lower complex numbers in a by the complex conjugates of the lower complex numbers in b, +/// and store the results in dst using writemask k (the element is copied from src when mask bit 0 is not set). +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which +/// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fcmul_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmulcsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fcmul_sch(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_cmul_sch(src, k, a, b) +} + +/// Multiply the lower complex numbers in a by the complex conjugates of the lower complex numbers in b, +/// and store the results in dst using zeromask k (the element is zeroed out when mask bit 0 is not set). +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which +/// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fcmul_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmulcsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_fcmul_sch(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_maskz_cmul_sch(k, a, b) +} + +/// Multiply the lower complex numbers in a by the complex conjugates of the lower complex numbers in b, +/// and store the results in dst. Each complex number is composed of two adjacent half-precision (16-bit) +/// floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fcmul_round_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmulcsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fcmul_round_sch(a: __m128h, b: __m128h) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm_cmul_round_sch::(a, b) +} + +/// Multiply the lower complex numbers in a by the complex conjugates of the lower complex numbers in b, +/// and store the results in dst using writemask k (the element is copied from src when mask bit 0 is not set). +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which +/// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fcmul_round_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmulcsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fcmul_round_sch( + src: __m128h, + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm_mask_cmul_round_sch::(src, k, a, b) +} + +/// Multiply the lower complex numbers in a by the complex conjugates of the lower complex numbers in b, +/// and store the results in dst using zeromask k (the element is zeroed out when mask bit 0 is not set). +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which +/// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fcmul_round_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmulcsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_fcmul_round_sch( + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm_maskz_cmul_round_sch::(k, a, b) +} + +#[allow(improper_ctypes)] +extern "C" { + #[link_name = "llvm.x86.avx512fp16.mask.cmp.sh"] + fn vcmpsh(a: __m128h, b: __m128h, imm8: i32, mask: __mmask8, sae: i32) -> __mmask8; + #[link_name = "llvm.x86.avx512fp16.vcomi.sh"] + fn vcomish(a: __m128h, b: __m128h, imm8: i32, sae: i32) -> i32; + + #[link_name = "llvm.x86.avx512fp16.add.ph.512"] + fn vaddph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.sub.ph.512"] + fn vsubph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mul.ph.512"] + fn vmulph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.div.ph.512"] + fn vdivph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + + #[link_name = "llvm.x86.avx512fp16.mask.add.sh.round"] + fn vaddsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.sub.sh.round"] + fn vsubsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.mul.sh.round"] + fn vmulsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.div.sh.round"] + fn vdivsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.128"] + fn vfmulcph_128(a: __m128, b: __m128, src: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.256"] + fn vfmulcph_256(a: __m256, b: __m256, src: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.512"] + fn vfmulcph_512(a: __m512, b: __m512, src: __m512, k: __mmask16, rounding: i32) -> __m512; + #[link_name = "llvm.x86.avx512fp16.mask.vfmul.csh"] + fn vfmulcsh(a: __m128, b: __m128, src: __m128, k: __mmask8, rounding: i32) -> __m128; + + #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.128"] + fn vfcmulcph_128(a: __m128, b: __m128, src: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.256"] + fn vfcmulcph_256(a: __m256, b: __m256, src: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.512"] + fn vfcmulcph_512(a: __m512, b: __m512, src: __m512, k: __mmask16, rounding: i32) -> __m512; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.csh"] + fn vfcmulcsh(a: __m128, b: __m128, src: __m128, k: __mmask8, rounding: i32) -> __m128; + +} + +#[cfg(test)] +mod tests { + use crate::core_arch::x86::*; + use crate::mem::transmute; + use crate::ptr::{addr_of, addr_of_mut}; + use stdarch_test::simd_test; + + #[target_feature(enable = "avx512fp16")] + unsafe fn _mm_set1_pch(re: f16, im: f16) -> __m128h { + _mm_setr_ph(re, im, re, im, re, im, re, im) + } + + #[target_feature(enable = "avx512fp16")] + unsafe fn _mm256_set1_pch(re: f16, im: f16) -> __m256h { + _mm256_setr_ph( + re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, + ) + } + + #[target_feature(enable = "avx512fp16")] + unsafe fn _mm512_set1_pch(re: f16, im: f16) -> __m512h { + _mm512_setr_ph( + re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, + re, im, re, im, re, im, re, im, re, im, + ) + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_set_ph() { + let r = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let e = _mm_setr_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_set_ph() { + let r = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let e = _mm256_setr_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_set_ph() { + let r = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let e = _mm512_setr_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_set_sh() { + let r = _mm_set_sh(1.0); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_set1_ph() { + let r = _mm_set1_ph(1.0); + let e = _mm_set_ph(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_set1_ph() { + let r = _mm256_set1_ph(1.0); + let e = _mm256_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_set1_ph() { + let r = _mm512_set1_ph(1.0); + let e = _mm512_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_setr_ph() { + let r = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let e = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_setr_ph() { + let r = _mm256_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let e = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_setr_ph() { + let r = _mm512_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let e = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_setzero_ph() { - let r = _mm_setzero_ph(); - let e = _mm_set1_ph(0.0); + unsafe fn test_mm_setzero_ph() { + let r = _mm_setzero_ph(); + let e = _mm_set1_ph(0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_setzero_ph() { + let r = _mm256_setzero_ph(); + let e = _mm256_set1_ph(0.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_setzero_ph() { + let r = _mm512_setzero_ph(); + let e = _mm512_set1_ph(0.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castsi128_ph() { + let a = _mm_set1_epi16(0x3c00); + let r = _mm_castsi128_ph(a); + let e = _mm_set1_ph(1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castsi256_ph() { + let a = _mm256_set1_epi16(0x3c00); + let r = _mm256_castsi256_ph(a); + let e = _mm256_set1_ph(1.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castsi512_ph() { + let a = _mm512_set1_epi16(0x3c00); + let r = _mm512_castsi512_ph(a); + let e = _mm512_set1_ph(1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castph_si128() { + let a = _mm_set1_ph(1.0); + let r = _mm_castph_si128(a); + let e = _mm_set1_epi16(0x3c00); + assert_eq_m128i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph_si256() { + let a = _mm256_set1_ph(1.0); + let r = _mm256_castph_si256(a); + let e = _mm256_set1_epi16(0x3c00); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph_si512() { + let a = _mm512_set1_ph(1.0); + let r = _mm512_castph_si512(a); + let e = _mm512_set1_epi16(0x3c00); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castps_ph() { + let a = _mm_castsi128_ps(_mm_set1_epi16(0x3c00)); + let r = _mm_castps_ph(a); + let e = _mm_set1_ph(1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castps_ph() { + let a = _mm256_castsi256_ps(_mm256_set1_epi16(0x3c00)); + let r = _mm256_castps_ph(a); + let e = _mm256_set1_ph(1.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castps_ph() { + let a = _mm512_castsi512_ps(_mm512_set1_epi16(0x3c00)); + let r = _mm512_castps_ph(a); + let e = _mm512_set1_ph(1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castph_ps() { + let a = _mm_castsi128_ph(_mm_set1_epi32(0x3f800000)); + let r = _mm_castph_ps(a); + let e = _mm_set1_ps(1.0); + assert_eq_m128(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph_ps() { + let a = _mm256_castsi256_ph(_mm256_set1_epi32(0x3f800000)); + let r = _mm256_castph_ps(a); + let e = _mm256_set1_ps(1.0); + assert_eq_m256(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph_ps() { + let a = _mm512_castsi512_ph(_mm512_set1_epi32(0x3f800000)); + let r = _mm512_castph_ps(a); + let e = _mm512_set1_ps(1.0); + assert_eq_m512(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castpd_ph() { + let a = _mm_castsi128_pd(_mm_set1_epi16(0x3c00)); + let r = _mm_castpd_ph(a); + let e = _mm_set1_ph(1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castpd_ph() { + let a = _mm256_castsi256_pd(_mm256_set1_epi16(0x3c00)); + let r = _mm256_castpd_ph(a); + let e = _mm256_set1_ph(1.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castpd_ph() { + let a = _mm512_castsi512_pd(_mm512_set1_epi16(0x3c00)); + let r = _mm512_castpd_ph(a); + let e = _mm512_set1_ph(1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castph_pd() { + let a = _mm_castsi128_ph(_mm_set1_epi64x(0x3ff0000000000000)); + let r = _mm_castph_pd(a); + let e = _mm_set1_pd(1.0); + assert_eq_m128d(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph_pd() { + let a = _mm256_castsi256_ph(_mm256_set1_epi64x(0x3ff0000000000000)); + let r = _mm256_castph_pd(a); + let e = _mm256_set1_pd(1.0); + assert_eq_m256d(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph_pd() { + let a = _mm512_castsi512_ph(_mm512_set1_epi64(0x3ff0000000000000)); + let r = _mm512_castph_pd(a); + let e = _mm512_set1_pd(1.0); + assert_eq_m512d(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph256_ph128() { + let a = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + let r = _mm256_castph256_ph128(a); + let e = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph512_ph128() { + let a = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., + ); + let r = _mm512_castph512_ph128(a); + let e = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph512_ph256() { + let a = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., + ); + let r = _mm512_castph512_ph256(a); + let e = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph128_ph256() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm256_castph128_ph256(a); + assert_eq_m128h(_mm256_castph256_ph128(r), a); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph128_ph512() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm512_castph128_ph512(a); + assert_eq_m128h(_mm512_castph512_ph128(r), a); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph256_ph512() { + let a = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + let r = _mm512_castph256_ph512(a); + assert_eq_m256h(_mm512_castph512_ph256(r), a); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_zextph128_ph256() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm256_zextph128_ph256(a); + let e = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 0., 0., 0., 0., 0., 0., 0., 0., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_zextph128_ph512() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm512_zextph128_ph512(a); + let e = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_zextph256_ph512() { + let a = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + let r = _mm512_zextph256_ph512(a); + let e = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cmp_round_sh_mask() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_cmp_round_sh_mask::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_cmp_round_sh_mask() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_mask_cmp_round_sh_mask::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(0, a, b); + assert_eq!(r, 0); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cmp_sh_mask() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_cmp_sh_mask::<_CMP_EQ_OQ>(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_cmp_sh_mask() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_mask_cmp_sh_mask::<_CMP_EQ_OQ>(0, a, b); + assert_eq!(r, 0); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comi_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_comi_round_sh::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comi_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_comi_sh::<_CMP_EQ_OQ>(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comieq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_comieq_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comige_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_comige_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comigt_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_comigt_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comile_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_comile_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comilt_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_comilt_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comineq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_comineq_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomieq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_ucomieq_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomige_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_ucomige_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomigt_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_ucomigt_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomile_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_ucomile_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomilt_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_ucomilt_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomineq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_ucomineq_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_load_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_load_ph(addr_of!(a).cast()); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_load_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_load_ph(addr_of!(a).cast()); + assert_eq_m256h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_load_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_load_ph(addr_of!(a).cast()); + assert_eq_m512h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_load_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_load_sh(addr_of!(a).cast()); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_load_sh() { + let a = _mm_set_sh(1.0); + let src = _mm_set_sh(2.); + let b = _mm_mask_load_sh(src, 1, addr_of!(a).cast()); + assert_eq_m128h(a, b); + let b = _mm_mask_load_sh(src, 0, addr_of!(a).cast()); + assert_eq_m128h(src, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_load_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_maskz_load_sh(1, addr_of!(a).cast()); + assert_eq_m128h(a, b); + let b = _mm_maskz_load_sh(0, addr_of!(a).cast()); + assert_eq_m128h(_mm_setzero_ph(), b); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_loadu_ph() { + let array = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; + let r = _mm_loadu_ph(array.as_ptr()); + let e = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_setzero_ph() { - let r = _mm256_setzero_ph(); - let e = _mm256_set1_ph(0.0); + unsafe fn test_mm256_loadu_ph() { + let array = [ + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ]; + let r = _mm256_loadu_ph(array.as_ptr()); + let e = _mm256_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_setzero_ph() { - let r = _mm512_setzero_ph(); - let e = _mm512_set1_ph(0.0); + unsafe fn test_mm512_loadu_ph() { + let array = [ + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ]; + let r = _mm512_loadu_ph(array.as_ptr()); + let e = _mm512_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castsi128_ph() { - let a = _mm_set1_epi16(0x3c00); - let r = _mm_castsi128_ph(a); - let e = _mm_set1_ph(1.0); + unsafe fn test_mm_move_sh() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_sh(9.0); + let r = _mm_move_sh(a, b); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castsi256_ph() { - let a = _mm256_set1_epi16(0x3c00); - let r = _mm256_castsi256_ph(a); - let e = _mm256_set1_ph(1.0); - assert_eq_m256h(r, e); + unsafe fn test_mm_mask_move_sh() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_sh(9.0); + let src = _mm_set_sh(10.0); + let r = _mm_mask_move_sh(src, 0, a, b); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 10.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castsi512_ph() { - let a = _mm512_set1_epi16(0x3c00); - let r = _mm512_castsi512_ph(a); - let e = _mm512_set1_ph(1.0); - assert_eq_m512h(r, e); + unsafe fn test_mm_maskz_move_sh() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_sh(9.0); + let r = _mm_maskz_move_sh(0, a, b); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_store_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let mut b = _mm_setzero_ph(); + _mm_store_ph(addr_of_mut!(b).cast(), a); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_store_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let mut b = _mm256_setzero_ph(); + _mm256_store_ph(addr_of_mut!(b).cast(), a); + assert_eq_m256h(a, b); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castph_si128() { - let a = _mm_set1_ph(1.0); - let r = _mm_castph_si128(a); - let e = _mm_set1_epi16(0x3c00); - assert_eq_m128i(r, e); + unsafe fn test_mm512_store_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let mut b = _mm512_setzero_ph(); + _mm512_store_ph(addr_of_mut!(b).cast(), a); + assert_eq_m512h(a, b); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph_si256() { - let a = _mm256_set1_ph(1.0); - let r = _mm256_castph_si256(a); - let e = _mm256_set1_epi16(0x3c00); - assert_eq_m256i(r, e); + unsafe fn test_mm_store_sh() { + let a = _mm_set_sh(1.0); + let mut b = _mm_setzero_ph(); + _mm_store_sh(addr_of_mut!(b).cast(), a); + assert_eq_m128h(a, b); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph_si512() { - let a = _mm512_set1_ph(1.0); - let r = _mm512_castph_si512(a); - let e = _mm512_set1_epi16(0x3c00); - assert_eq_m512i(r, e); + unsafe fn test_mm_mask_store_sh() { + let a = _mm_set_sh(1.0); + let mut b = _mm_setzero_ph(); + _mm_mask_store_sh(addr_of_mut!(b).cast(), 0, a); + assert_eq_m128h(_mm_setzero_ph(), b); + _mm_mask_store_sh(addr_of_mut!(b).cast(), 1, a); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_storeu_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let mut array = [0.0; 8]; + _mm_storeu_ph(array.as_mut_ptr(), a); + assert_eq_m128h(a, _mm_loadu_ph(array.as_ptr())); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_storeu_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let mut array = [0.0; 16]; + _mm256_storeu_ph(array.as_mut_ptr(), a); + assert_eq_m256h(a, _mm256_loadu_ph(array.as_ptr())); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castps_ph() { - let a = _mm_castsi128_ps(_mm_set1_epi16(0x3c00)); - let r = _mm_castps_ph(a); - let e = _mm_set1_ph(1.0); + unsafe fn test_mm512_storeu_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let mut array = [0.0; 32]; + _mm512_storeu_ph(array.as_mut_ptr(), a); + assert_eq_m512h(a, _mm512_loadu_ph(array.as_ptr())); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_add_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_add_ph(a, b); + let e = _mm_set1_ph(9.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castps_ph() { - let a = _mm256_castsi256_ps(_mm256_set1_epi16(0x3c00)); - let r = _mm256_castps_ph(a); - let e = _mm256_set1_ph(1.0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_add_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_add_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(10., 9., 12., 9., 14., 9., 16., 9.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_add_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_maskz_add_ph(0b01010101, a, b); + let e = _mm_set_ph(0., 9., 0., 9., 0., 9., 0., 9.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_add_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_add_ph(a, b); + let e = _mm256_set1_ph(17.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_add_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let src = _mm256_set_ph( + 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + ); + let r = _mm256_mask_add_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 18., 17., 20., 17., 22., 17., 24., 17., 26., 17., 28., 17., 30., 17., 32., 17., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_add_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_maskz_add_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., + ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castps_ph() { - let a = _mm512_castsi512_ps(_mm512_set1_epi16(0x3c00)); - let r = _mm512_castps_ph(a); - let e = _mm512_set1_ph(1.0); + unsafe fn test_mm512_add_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_add_ph(a, b); + let e = _mm512_set1_ph(33.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_add_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_add_ph(src, 0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 34., 33., 36., 33., 38., 33., 40., 33., 42., 33., 44., 33., 46., 33., 48., 33., 50., + 33., 52., 33., 54., 33., 56., 33., 58., 33., 60., 33., 62., 33., 64., 33., + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castph_ps() { - let a = _mm_castsi128_ph(_mm_set1_epi32(0x3f800000)); - let r = _mm_castph_ps(a); - let e = _mm_set1_ps(1.0); - assert_eq_m128(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph_ps() { - let a = _mm256_castsi256_ph(_mm256_set1_epi32(0x3f800000)); - let r = _mm256_castph_ps(a); - let e = _mm256_set1_ps(1.0); - assert_eq_m256(r, e); + unsafe fn test_mm512_maskz_add_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_add_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., + 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph_ps() { - let a = _mm512_castsi512_ph(_mm512_set1_epi32(0x3f800000)); - let r = _mm512_castph_ps(a); - let e = _mm512_set1_ps(1.0); - assert_eq_m512(r, e); + unsafe fn test_mm512_add_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_ph(33.0); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castpd_ph() { - let a = _mm_castsi128_pd(_mm_set1_epi16(0x3c00)); - let r = _mm_castpd_ph(a); - let e = _mm_set1_ph(1.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_mask_add_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 34., 33., 36., 33., 38., 33., 40., 33., 42., 33., 44., 33., 46., 33., 48., 33., 50., + 33., 52., 33., 54., 33., 56., 33., 58., 33., 60., 33., 62., 33., 64., 33., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castpd_ph() { - let a = _mm256_castsi256_pd(_mm256_set1_epi16(0x3c00)); - let r = _mm256_castpd_ph(a); - let e = _mm256_set1_ph(1.0); - assert_eq_m256h(r, e); + unsafe fn test_mm512_maskz_add_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., + 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castpd_ph() { - let a = _mm512_castsi512_pd(_mm512_set1_epi16(0x3c00)); - let r = _mm512_castpd_ph(a); - let e = _mm512_set1_ph(1.0); - assert_eq_m512h(r, e); + unsafe fn test_mm_add_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castph_pd() { - let a = _mm_castsi128_ph(_mm_set1_epi64x(0x3ff0000000000000)); - let r = _mm_castph_pd(a); - let e = _mm_set1_pd(1.0); - assert_eq_m128d(r, e); + unsafe fn test_mm_mask_add_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph_pd() { - let a = _mm256_castsi256_ph(_mm256_set1_epi64x(0x3ff0000000000000)); - let r = _mm256_castph_pd(a); - let e = _mm256_set1_pd(1.0); - assert_eq_m256d(r, e); + unsafe fn test_mm_maskz_add_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = + _mm_maskz_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph_pd() { - let a = _mm512_castsi512_ph(_mm512_set1_epi64(0x3ff0000000000000)); - let r = _mm512_castph_pd(a); - let e = _mm512_set1_pd(1.0); - assert_eq_m512d(r, e); + unsafe fn test_mm_add_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_add_sh(a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph256_ph128() { - let a = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., - ); - let r = _mm256_castph256_ph128(a); - let e = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + unsafe fn test_mm_mask_add_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_add_sh(src, 0, a, b); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_add_sh(src, 1, a, b); + let e = _mm_set_sh(3.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph512_ph128() { - let a = _mm512_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., - 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., - ); - let r = _mm512_castph512_ph128(a); - let e = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + unsafe fn test_mm_maskz_add_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_maskz_add_sh(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_add_sh(1, a, b); + let e = _mm_set_sh(3.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph512_ph256() { - let a = _mm512_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., - 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., - ); - let r = _mm512_castph512_ph256(a); - let e = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., - ); - assert_eq_m256h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_sub_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_sub_ph(a, b); + let e = _mm_set_ph(-7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph128_ph256() { - let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - let r = _mm256_castph128_ph256(a); - assert_eq_m128h(_mm256_castph256_ph128(r), a); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_sub_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_sub_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(10., -5., 12., -1., 14., 3., 16., 7.); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph128_ph512() { - let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - let r = _mm512_castph128_ph512(a); - assert_eq_m128h(_mm512_castph512_ph128(r), a); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_sub_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_maskz_sub_ph(0b01010101, a, b); + let e = _mm_set_ph(0., -5., 0., -1., 0., 3., 0., 7.); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph256_ph512() { - let a = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_sub_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let r = _mm512_castph256_ph512(a); - assert_eq_m256h(_mm512_castph512_ph256(r), a); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_sub_ph(a, b); + let e = _mm256_set_ph( + -15.0, -13.0, -11.0, -9.0, -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, + 15.0, + ); + assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_zextph128_ph256() { - let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - let r = _mm256_zextph128_ph256(a); - let e = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 0., 0., 0., 0., 0., 0., 0., 0., + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_sub_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let src = _mm256_set_ph( + 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + ); + let r = _mm256_mask_sub_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 18., -13., 20., -9., 22., -5., 24., -1., 26., 3., 28., 7., 30., 11., 32., 15., ); assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_zextph128_ph512() { - let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - let r = _mm512_zextph128_ph512(a); - let e = _mm512_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., - 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_sub_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - assert_eq_m512h(r, e); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_maskz_sub_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., 0., 7., 0., 11., 0., 15., + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_zextph256_ph512() { - let a = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + unsafe fn test_mm512_sub_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); - let r = _mm512_zextph256_ph512(a); - let e = _mm512_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 0., 0., 0., 0., - 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_sub_ph(a, b); + let e = _mm512_set_ph( + -31.0, -29.0, -27.0, -25.0, -23.0, -21.0, -19.0, -17.0, -15.0, -13.0, -11.0, -9.0, + -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, + 23.0, 25.0, 27.0, 29.0, 31.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comi_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_comi_round_sh::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_mask_sub_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_sub_ph(src, 0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 34., -29., 36., -25., 38., -21., 40., -17., 42., -13., 44., -9., 46., -5., 48., -1., + 50., 3., 52., 7., 54., 11., 56., 15., 58., 19., 60., 23., 62., 27., 64., 31., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comi_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_comi_sh::<_CMP_EQ_OQ>(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_maskz_sub_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_sub_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0., -29., 0., -25., 0., -21., 0., -17., 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., + 0., 7., 0., 11., 0., 15., 0., 19., 0., 23., 0., 27., 0., 31., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comieq_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_comieq_sh(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_sub_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set_ph( + -31.0, -29.0, -27.0, -25.0, -23.0, -21.0, -19.0, -17.0, -15.0, -13.0, -11.0, -9.0, + -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, + 23.0, 25.0, 27.0, 29.0, 31.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comige_sh() { - let a = _mm_set_sh(2.0); - let b = _mm_set_sh(1.0); - let r = _mm_comige_sh(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_mask_sub_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 34., -29., 36., -25., 38., -21., 40., -17., 42., -13., 44., -9., 46., -5., 48., -1., + 50., 3., 52., 7., 54., 11., 56., 15., 58., 19., 60., 23., 62., 27., 64., 31., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comigt_sh() { - let a = _mm_set_sh(2.0); - let b = _mm_set_sh(1.0); - let r = _mm_comigt_sh(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_maskz_sub_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 0., -29., 0., -25., 0., -21., 0., -17., 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., + 0., 7., 0., 11., 0., 15., 0., 19., 0., 23., 0., 27., 0., 31., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comile_sh() { + unsafe fn test_mm_sub_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_comile_sh(a, b); - assert_eq!(r, 1); + let r = _mm_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comilt_sh() { + unsafe fn test_mm_mask_sub_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_comilt_sh(a, b); - assert_eq!(r, 1); + let src = _mm_set_sh(4.0); + let r = _mm_mask_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comineq_sh() { + unsafe fn test_mm_maskz_sub_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_comineq_sh(a, b); - assert_eq!(r, 1); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomieq_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_ucomieq_sh(a, b); - assert_eq!(r, 1); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomige_sh() { - let a = _mm_set_sh(2.0); - let b = _mm_set_sh(1.0); - let r = _mm_ucomige_sh(a, b); - assert_eq!(r, 1); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomigt_sh() { - let a = _mm_set_sh(2.0); - let b = _mm_set_sh(1.0); - let r = _mm_ucomigt_sh(a, b); - assert_eq!(r, 1); + let r = + _mm_maskz_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomile_sh() { + unsafe fn test_mm_sub_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_ucomile_sh(a, b); - assert_eq!(r, 1); + let r = _mm_sub_sh(a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomilt_sh() { + unsafe fn test_mm_mask_sub_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_ucomilt_sh(a, b); - assert_eq!(r, 1); + let src = _mm_set_sh(4.0); + let r = _mm_mask_sub_sh(src, 0, a, b); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_sub_sh(src, 1, a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomineq_sh() { + unsafe fn test_mm_maskz_sub_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_ucomineq_sh(a, b); - assert_eq!(r, 1); + let r = _mm_maskz_sub_sh(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_sub_sh(1, a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_load_ph() { + unsafe fn test_mm_mul_ph() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_load_ph(addr_of!(a).cast()); - assert_eq_m128h(a, b); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_mul_ph(a, b); + let e = _mm_set_ph(8.0, 14.0, 18.0, 20.0, 20.0, 18.0, 14.0, 8.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_load_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_load_ph(addr_of!(a).cast()); - assert_eq_m256h(a, b); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_load_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_load_ph(addr_of!(a).cast()); - assert_eq_m512h(a, b); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_load_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_load_sh(addr_of!(a).cast()); - assert_eq_m128h(a, b); + unsafe fn test_mm_mask_mul_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_mul_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(10., 14., 12., 20., 14., 18., 16., 8.); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_load_sh() { - let a = _mm_set_sh(1.0); - let src = _mm_set_sh(2.); - let b = _mm_mask_load_sh(src, 1, addr_of!(a).cast()); - assert_eq_m128h(a, b); - let b = _mm_mask_load_sh(src, 0, addr_of!(a).cast()); - assert_eq_m128h(src, b); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_mul_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_maskz_mul_ph(0b01010101, a, b); + let e = _mm_set_ph(0., 14., 0., 20., 0., 18., 0., 8.); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_load_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_maskz_load_sh(1, addr_of!(a).cast()); - assert_eq_m128h(a, b); - let b = _mm_maskz_load_sh(0, addr_of!(a).cast()); - assert_eq_m128h(_mm_setzero_ph(), b); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mul_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_mul_ph(a, b); + let e = _mm256_set_ph( + 16.0, 30.0, 42.0, 52.0, 60.0, 66.0, 70.0, 72.0, 72.0, 70.0, 66.0, 60.0, 52.0, 42.0, + 30.0, 16.0, + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_loadu_ph() { - let array = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; - let r = _mm_loadu_ph(array.as_ptr()); - let e = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - assert_eq_m128h(r, e); + unsafe fn test_mm256_mask_mul_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let src = _mm256_set_ph( + 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + ); + let r = _mm256_mask_mul_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 18., 30., 20., 52., 22., 66., 24., 72., 26., 70., 28., 60., 30., 42., 32., 16., + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_loadu_ph() { - let array = [ - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ]; - let r = _mm256_loadu_ph(array.as_ptr()); - let e = _mm256_setr_ph( + unsafe fn test_mm256_maskz_mul_ph() { + let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_maskz_mul_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0., 30., 0., 52., 0., 66., 0., 72., 0., 70., 0., 60., 0., 42., 0., 16., + ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_loadu_ph() { - let array = [ + unsafe fn test_mm512_mul_ph() { + let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, - ]; - let r = _mm512_loadu_ph(array.as_ptr()); - let e = _mm512_setr_ph( + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_mul_ph(a, b); + let e = _mm512_set_ph( + 32.0, 62.0, 90.0, 116.0, 140.0, 162.0, 182.0, 200.0, 216.0, 230.0, 242.0, 252.0, 260.0, + 266.0, 270.0, 272.0, 272.0, 270.0, 266.0, 260.0, 252.0, 242.0, 230.0, 216.0, 200.0, + 182.0, 162.0, 140.0, 116.0, 90.0, 62.0, 32.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_mul_ph() { + let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_mul_ph(src, 0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 34., 62., 36., 116., 38., 162., 40., 200., 42., 230., 44., 252., 46., 266., 48., 272., + 50., 270., 52., 260., 54., 242., 56., 216., 58., 182., 60., 140., 62., 90., 64., 32., + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_move_sh() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_sh(9.0); - let r = _mm_move_sh(a, b); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_maskz_mul_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_mul_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0., 62., 0., 116., 0., 162., 0., 200., 0., 230., 0., 252., 0., 266., 0., 272., 0., + 270., 0., 260., 0., 242., 0., 216., 0., 182., 0., 140., 0., 90., 0., 32., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_move_sh() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_sh(9.0); - let src = _mm_set_sh(10.0); - let r = _mm_mask_move_sh(src, 0, a, b); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 10.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_mul_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set_ph( + 32.0, 62.0, 90.0, 116.0, 140.0, 162.0, 182.0, 200.0, 216.0, 230.0, 242.0, 252.0, 260.0, + 266.0, 270.0, 272.0, 272.0, 270.0, 266.0, 260.0, 252.0, 242.0, 230.0, 216.0, 200.0, + 182.0, 162.0, 140.0, 116.0, 90.0, 62.0, 32.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_move_sh() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_sh(9.0); - let r = _mm_maskz_move_sh(0, a, b); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 0.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_store_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let mut b = _mm_setzero_ph(); - _mm_store_ph(addr_of_mut!(b).cast(), a); - assert_eq_m128h(a, b); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_store_ph() { - let a = _mm256_set_ph( + unsafe fn test_mm512_mask_mul_round_ph() { + let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + b, ); - let mut b = _mm256_setzero_ph(); - _mm256_store_ph(addr_of_mut!(b).cast(), a); - assert_eq_m256h(a, b); + let e = _mm512_set_ph( + 34., 62., 36., 116., 38., 162., 40., 200., 42., 230., 44., 252., 46., 266., 48., 272., + 50., 270., 52., 260., 54., 242., 56., 216., 58., 182., 60., 140., 62., 90., 64., 32., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_store_ph() { + unsafe fn test_mm512_maskz_mul_round_ph() { let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, ); - let mut b = _mm512_setzero_ph(); - _mm512_store_ph(addr_of_mut!(b).cast(), a); - assert_eq_m512h(a, b); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 0., 62., 0., 116., 0., 162., 0., 200., 0., 230., 0., 252., 0., 266., 0., 272., 0., + 270., 0., 260., 0., 242., 0., 216., 0., 182., 0., 140., 0., 90., 0., 32., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_store_sh() { + unsafe fn test_mm_mul_round_sh() { let a = _mm_set_sh(1.0); - let mut b = _mm_setzero_ph(); - _mm_store_sh(addr_of_mut!(b).cast(), a); - assert_eq_m128h(a, b); + let b = _mm_set_sh(2.0); + let r = _mm_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_store_sh() { + unsafe fn test_mm_mask_mul_round_sh() { let a = _mm_set_sh(1.0); - let mut b = _mm_setzero_ph(); - _mm_mask_store_sh(addr_of_mut!(b).cast(), 0, a); - assert_eq_m128h(_mm_setzero_ph(), b); - _mm_mask_store_sh(addr_of_mut!(b).cast(), 1, a); - assert_eq_m128h(a, b); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_storeu_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let mut array = [0.0; 8]; - _mm_storeu_ph(array.as_mut_ptr(), a); - assert_eq_m128h(a, _mm_loadu_ph(array.as_ptr())); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_mul_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = + _mm_maskz_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_storeu_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let mut array = [0.0; 16]; - _mm256_storeu_ph(array.as_mut_ptr(), a); - assert_eq_m256h(a, _mm256_loadu_ph(array.as_ptr())); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mul_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_mul_sh(a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_storeu_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let mut array = [0.0; 32]; - _mm512_storeu_ph(array.as_mut_ptr(), a); - assert_eq_m512h(a, _mm512_loadu_ph(array.as_ptr())); + unsafe fn test_mm_mask_mul_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_mul_sh(src, 0, a, b); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_mul_sh(src, 1, a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_mul_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_maskz_mul_sh(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_mul_sh(1, a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_add_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_add_ph(a, b); - let e = _mm_set1_ph(9.0); + unsafe fn test_mm_div_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let r = _mm_div_ph(a, b); + let e = _mm_set1_ph(0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_add_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm_mask_add_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(10., 9., 12., 9., 14., 9., 16., 9.); + unsafe fn test_mm_mask_div_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let src = _mm_set_ph(4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0); + let r = _mm_mask_div_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_add_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_maskz_add_ph(0b01010101, a, b); - let e = _mm_set_ph(0., 9., 0., 9., 0., 9., 0., 9.); + unsafe fn test_mm_maskz_div_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let r = _mm_maskz_div_ph(0b01010101, a, b); + let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_add_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_add_ph(a, b); - let e = _mm256_set1_ph(17.0); + unsafe fn test_mm256_div_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let r = _mm256_div_ph(a, b); + let e = _mm256_set1_ph(0.5); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_add_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); + unsafe fn test_mm256_mask_div_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); let src = _mm256_set_ph( - 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, + 19.0, ); - let r = _mm256_mask_add_ph(src, 0b0101010101010101, a, b); + let r = _mm256_mask_div_ph(src, 0b0101010101010101, a, b); let e = _mm256_set_ph( - 18., 17., 20., 17., 22., 17., 24., 17., 26., 17., 28., 17., 30., 17., 32., 17., + 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_add_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_maskz_add_ph(0b0101010101010101, a, b); + unsafe fn test_mm256_maskz_div_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let r = _mm256_maskz_div_ph(0b0101010101010101, a, b); let e = _mm256_set_ph( - 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_add_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_add_ph(a, b); - let e = _mm512_set1_ph(33.0); + unsafe fn test_mm512_div_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_div_ph(a, b); + let e = _mm512_set1_ph(0.5); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_add_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); + unsafe fn test_mm512_mask_div_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, + 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, + 33.0, 34.0, 35.0, ); - let r = _mm512_mask_add_ph(src, 0b01010101010101010101010101010101, a, b); + let r = _mm512_mask_div_ph(src, 0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 34., 33., 36., 33., 38., 33., 40., 33., 42., 33., 44., 33., 46., 33., 48., 33., 50., - 33., 52., 33., 54., 33., 56., 33., 58., 33., 60., 33., 62., 33., 64., 33., + 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, + 20.0, 0.5, 22.0, 0.5, 24.0, 0.5, 26.0, 0.5, 28.0, 0.5, 30.0, 0.5, 32.0, 0.5, 34.0, 0.5, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_add_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_add_ph(0b01010101010101010101010101010101, a, b); + unsafe fn test_mm512_maskz_div_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_maskz_div_ph(0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., - 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, + 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_add_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_ph(33.0); + unsafe fn test_mm512_div_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_ph(0.5); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_add_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); + unsafe fn test_mm512_mask_div_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, + 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, + 33.0, 34.0, 35.0, ); - let r = _mm512_mask_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 0b01010101010101010101010101010101, a, b, ); let e = _mm512_set_ph( - 34., 33., 36., 33., 38., 33., 40., 33., 42., 33., 44., 33., 46., 33., 48., 33., 50., - 33., 52., 33., 54., 33., 56., 33., 58., 33., 60., 33., 62., 33., 64., 33., + 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, + 20.0, 0.5, 22.0, 0.5, 24.0, 0.5, 26.0, 0.5, 28.0, 0.5, 30.0, 0.5, 32.0, 0.5, 34.0, 0.5, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_add_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_maskz_div_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_maskz_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b01010101010101010101010101010101, a, b, ); let e = _mm512_set_ph( - 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., - 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, + 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_add_round_sh() { + unsafe fn test_mm_div_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_set_sh(3.0); + let r = _mm_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_add_round_sh() { + unsafe fn test_mm_mask_div_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); let src = _mm_set_sh(4.0); - let r = _mm_mask_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 0, a, b, ); let e = _mm_set_sh(4.0); assert_eq_m128h(r, e); - let r = _mm_mask_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 1, a, b, ); - let e = _mm_set_sh(3.0); + let e = _mm_set_sh(0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_add_round_sh() { + unsafe fn test_mm_maskz_div_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); let r = - _mm_maskz_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + _mm_maskz_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); let e = _mm_set_sh(0.0); assert_eq_m128h(r, e); let r = - _mm_maskz_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_set_sh(3.0); + _mm_maskz_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_add_sh() { + unsafe fn test_mm_div_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_add_sh(a, b); - let e = _mm_set_sh(3.0); + let r = _mm_div_sh(a, b); + let e = _mm_set_sh(0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_add_sh() { + unsafe fn test_mm_mask_div_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); let src = _mm_set_sh(4.0); - let r = _mm_mask_add_sh(src, 0, a, b); + let r = _mm_mask_div_sh(src, 0, a, b); let e = _mm_set_sh(4.0); assert_eq_m128h(r, e); - let r = _mm_mask_add_sh(src, 1, a, b); - let e = _mm_set_sh(3.0); + let r = _mm_mask_div_sh(src, 1, a, b); + let e = _mm_set_sh(0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_add_sh() { + unsafe fn test_mm_maskz_div_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_maskz_add_sh(0, a, b); + let r = _mm_maskz_div_sh(0, a, b); let e = _mm_set_sh(0.0); assert_eq_m128h(r, e); - let r = _mm_maskz_add_sh(1, a, b); - let e = _mm_set_sh(3.0); + let r = _mm_maskz_div_sh(1, a, b); + let e = _mm_set_sh(0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_sub_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_sub_ph(a, b); - let e = _mm_set_ph(-7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0); + unsafe fn test_mm_mul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let r = _mm_mul_pch(a, b); + let e = _mm_set1_pch(-1.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_sub_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm_mask_sub_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(10., -5., 12., -1., 14., 3., 16., 7.); + unsafe fn test_mm_mask_mul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); + let r = _mm_mask_mul_pch(src, 0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_sub_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_maskz_sub_ph(0b01010101, a, b); - let e = _mm_set_ph(0., -5., 0., -1., 0., 3., 0., 7.); + unsafe fn test_mm_maskz_mul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let r = _mm_maskz_mul_pch(0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_sub_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_sub_ph(a, b); - let e = _mm256_set_ph( - -15.0, -13.0, -11.0, -9.0, -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, - 15.0, - ); + unsafe fn test_mm256_mul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_mul_pch(a, b); + let e = _mm256_set1_pch(-1.0, 0.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_sub_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let src = _mm256_set_ph( - 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + unsafe fn test_mm256_mask_mul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let src = _mm256_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, ); - let r = _mm256_mask_sub_ph(src, 0b0101010101010101, a, b); - let e = _mm256_set_ph( - 18., -13., 20., -9., 22., -5., 24., -1., 26., 3., 28., 7., 30., 11., 32., 15., + let r = _mm256_mask_mul_pch(src, 0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_sub_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + unsafe fn test_mm256_maskz_mul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_maskz_mul_pch(0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_mul_pch(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_mul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, + ); + let r = _mm512_mask_mul_pch(src, 0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); - let r = _mm256_maskz_sub_ph(0b0101010101010101, a, b); - let e = _mm256_set_ph( - 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., 0., 7., 0., 11., 0., 15., + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_mul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_mul_pch(0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); - assert_eq_m256h(r, e); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_sub_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, + unsafe fn test_mm512_mul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_mul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, + ); + let r = _mm512_mask_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b0101010101010101, + a, + b, ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); - let r = _mm512_sub_ph(a, b); - let e = _mm512_set_ph( - -31.0, -29.0, -27.0, -25.0, -23.0, -21.0, -19.0, -17.0, -15.0, -13.0, -11.0, -9.0, - -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, - 23.0, 25.0, 27.0, 29.0, 31.0, + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_mul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, + a, + b, + ); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_mul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_mul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = + _mm_maskz_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_mul_sch(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_mul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_mul_sch(src, 0, a, b); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_mul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_maskz_mul_sch(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_fmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let r = _mm_fmul_pch(a, b); + let e = _mm_set1_pch(-1.0, 0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_fmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); + let r = _mm_mask_fmul_pch(src, 0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_fmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let r = _mm_maskz_fmul_pch(0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_fmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_fmul_pch(a, b); + let e = _mm256_set1_pch(-1.0, 0.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_fmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let src = _mm256_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, ); + let r = _mm256_mask_fmul_pch(src, 0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_fmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_maskz_fmul_pch(0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_fmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_fmul_pch(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_sub_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., - ); - let r = _mm512_mask_sub_ph(src, 0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 34., -29., 36., -25., 38., -21., 40., -17., 42., -13., 44., -9., 46., -5., 48., -1., - 50., 3., 52., 7., 54., 11., 56., 15., 58., 19., 60., 23., 62., 27., 64., 31., + unsafe fn test_mm512_mask_fmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, + ); + let r = _mm512_mask_fmul_pch(src, 0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_sub_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_sub_ph(0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 0., -29., 0., -25., 0., -21., 0., -17., 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., - 0., 7., 0., 11., 0., 15., 0., 19., 0., 23., 0., 27., 0., 31., + unsafe fn test_mm512_maskz_fmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_fmul_pch(0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_sub_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set_ph( - -31.0, -29.0, -27.0, -25.0, -23.0, -21.0, -19.0, -17.0, -15.0, -13.0, -11.0, -9.0, - -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, - 23.0, 25.0, 27.0, 29.0, 31.0, - ); + unsafe fn test_mm512_fmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_sub_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., - ); - let r = _mm512_mask_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask_fmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, + ); + let r = _mm512_mask_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, - 0b01010101010101010101010101010101, + 0b0101010101010101, a, b, ); - let e = _mm512_set_ph( - 34., -29., 36., -25., 38., -21., 40., -17., 42., -13., 44., -9., 46., -5., 48., -1., - 50., 3., 52., 7., 54., 11., 56., 15., 58., 19., 60., 23., 62., 27., 64., 31., + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_sub_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, + unsafe fn test_mm512_maskz_fmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, a, b, ); - let e = _mm512_set_ph( - 0., -29., 0., -25., 0., -21., 0., -17., 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., - 0., 7., 0., 11., 0., 15., 0., 19., 0., 23., 0., 27., 0., 31., + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_sub_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_set_sh(-1.0); + unsafe fn test_mm_fmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_sub_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_mask_fmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 0, a, b, ); - let e = _mm_set_sh(4.0); - assert_eq_m128h(r, e); - let r = _mm_mask_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_set_sh(-1.0); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_sub_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = - _mm_maskz_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_set_sh(0.0); - assert_eq_m128h(r, e); + unsafe fn test_mm_maskz_fmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); let r = - _mm_maskz_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_set_sh(-1.0); + _mm_maskz_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_sub_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_sub_sh(a, b); - let e = _mm_set_sh(-1.0); + unsafe fn test_mm_fmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_fmul_sch(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_sub_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_sub_sh(src, 0, a, b); - let e = _mm_set_sh(4.0); - assert_eq_m128h(r, e); - let r = _mm_mask_sub_sh(src, 1, a, b); - let e = _mm_set_sh(-1.0); + unsafe fn test_mm_mask_fmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_fmul_sch(src, 0, a, b); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_sub_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_maskz_sub_sh(0, a, b); - let e = _mm_set_sh(0.0); - assert_eq_m128h(r, e); - let r = _mm_maskz_sub_sh(1, a, b); - let e = _mm_set_sh(-1.0); + unsafe fn test_mm_maskz_fmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_maskz_fmul_sch(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mul_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_mul_ph(a, b); - let e = _mm_set_ph(8.0, 14.0, 18.0, 20.0, 20.0, 18.0, 14.0, 8.0); + unsafe fn test_mm_cmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let r = _mm_cmul_pch(a, b); + let e = _mm_set1_pch(-1.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_mul_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm_mask_mul_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(10., 14., 12., 20., 14., 18., 16., 8.); + unsafe fn test_mm_mask_cmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); + let r = _mm_mask_cmul_pch(src, 0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_mul_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_maskz_mul_ph(0b01010101, a, b); - let e = _mm_set_ph(0., 14., 0., 20., 0., 18., 0., 8.); + unsafe fn test_mm_maskz_cmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let r = _mm_maskz_cmul_pch(0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mul_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_mul_ph(a, b); - let e = _mm256_set_ph( - 16.0, 30.0, 42.0, 52.0, 60.0, 66.0, 70.0, 72.0, 72.0, 70.0, 66.0, 60.0, 52.0, 42.0, - 30.0, 16.0, - ); + unsafe fn test_mm256_cmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let r = _mm256_cmul_pch(a, b); + let e = _mm256_set1_pch(-1.0, 0.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_mul_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + unsafe fn test_mm256_mask_cmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let src = _mm256_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, ); - let src = _mm256_set_ph( - 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., - ); - let r = _mm256_mask_mul_ph(src, 0b0101010101010101, a, b); - let e = _mm256_set_ph( - 18., 30., 20., 52., 22., 66., 24., 72., 26., 70., 28., 60., 30., 42., 32., 16., + let r = _mm256_mask_cmul_pch(src, 0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_mul_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_maskz_mul_ph(0b0101010101010101, a, b); - let e = _mm256_set_ph( - 0., 30., 0., 52., 0., 66., 0., 72., 0., 70., 0., 60., 0., 42., 0., 16., + unsafe fn test_mm256_maskz_cmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let r = _mm256_maskz_cmul_pch(0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mul_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_mul_ph(a, b); - let e = _mm512_set_ph( - 32.0, 62.0, 90.0, 116.0, 140.0, 162.0, 182.0, 200.0, 216.0, 230.0, 242.0, 252.0, 260.0, - 266.0, 270.0, 272.0, 272.0, 270.0, 266.0, 260.0, 252.0, 242.0, 230.0, 216.0, 200.0, - 182.0, 162.0, 140.0, 116.0, 90.0, 62.0, 32.0, - ); + unsafe fn test_mm512_cmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_cmul_pch(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_mul_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., - ); - let r = _mm512_mask_mul_ph(src, 0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 34., 62., 36., 116., 38., 162., 40., 200., 42., 230., 44., 252., 46., 266., 48., 272., - 50., 270., 52., 260., 54., 242., 56., 216., 58., 182., 60., 140., 62., 90., 64., 32., + unsafe fn test_mm512_mask_cmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, + ); + let r = _mm512_mask_cmul_pch(src, 0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_mul_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_mul_ph(0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 0., 62., 0., 116., 0., 162., 0., 200., 0., 230., 0., 252., 0., 266., 0., 272., 0., - 270., 0., 260., 0., 242., 0., 216., 0., 182., 0., 140., 0., 90., 0., 32., + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_cmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_maskz_cmul_pch(0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mul_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set_ph( - 32.0, 62.0, 90.0, 116.0, 140.0, 162.0, 182.0, 200.0, 216.0, 230.0, 242.0, 252.0, 260.0, - 266.0, 270.0, 272.0, 272.0, 270.0, 266.0, 260.0, 252.0, 242.0, 230.0, 216.0, 200.0, - 182.0, 162.0, 140.0, 116.0, 90.0, 62.0, 32.0, - ); + unsafe fn test_mm512_cmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_mul_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., - ); - let r = _mm512_mask_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask_cmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, + ); + let r = _mm512_mask_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, - 0b01010101010101010101010101010101, + 0b0101010101010101, a, b, ); - let e = _mm512_set_ph( - 34., 62., 36., 116., 38., 162., 40., 200., 42., 230., 44., 252., 46., 266., 48., 272., - 50., 270., 52., 260., 54., 242., 56., 216., 58., 182., 60., 140., 62., 90., 64., 32., + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_mul_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, + unsafe fn test_mm512_maskz_cmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_maskz_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, a, b, ); - let e = _mm512_set_ph( - 0., 62., 0., 116., 0., 162., 0., 200., 0., 230., 0., 252., 0., 266., 0., 272., 0., - 270., 0., 260., 0., 242., 0., 216., 0., 182., 0., 140., 0., 90., 0., 32., + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mul_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_set_sh(2.0); + unsafe fn test_mm_cmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_cmul_sch(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_mul_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_set_sh(4.0); - assert_eq_m128h(r, e); - let r = _mm_mask_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_set_sh(2.0); + unsafe fn test_mm_mask_cmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_cmul_sch(src, 0, a, b); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_mul_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = - _mm_maskz_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_set_sh(0.0); - assert_eq_m128h(r, e); - let r = - _mm_maskz_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_set_sh(2.0); + unsafe fn test_mm_maskz_cmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_maskz_cmul_sch(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mul_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_mul_sh(a, b); - let e = _mm_set_sh(2.0); + unsafe fn test_mm_cmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_mul_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_mul_sh(src, 0, a, b); - let e = _mm_set_sh(4.0); - assert_eq_m128h(r, e); - let r = _mm_mask_mul_sh(src, 1, a, b); - let e = _mm_set_sh(2.0); + unsafe fn test_mm_mask_cmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_mul_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_maskz_mul_sh(0, a, b); - let e = _mm_set_sh(0.0); - assert_eq_m128h(r, e); - let r = _mm_maskz_mul_sh(1, a, b); - let e = _mm_set_sh(2.0); + unsafe fn test_mm_maskz_cmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = + _mm_maskz_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_div_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let r = _mm_div_ph(a, b); - let e = _mm_set1_ph(0.5); + unsafe fn test_mm_fcmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let r = _mm_fcmul_pch(a, b); + let e = _mm_set1_pch(-1.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_div_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let src = _mm_set_ph(4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0); - let r = _mm_mask_div_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5); + unsafe fn test_mm_mask_fcmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); + let r = _mm_mask_fcmul_pch(src, 0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_div_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let r = _mm_maskz_div_ph(0b01010101, a, b); - let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); + unsafe fn test_mm_maskz_fcmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let r = _mm_maskz_fcmul_pch(0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_div_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let r = _mm256_div_ph(a, b); - let e = _mm256_set1_ph(0.5); + unsafe fn test_mm256_fcmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let r = _mm256_fcmul_pch(a, b); + let e = _mm256_set1_pch(-1.0, 0.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_div_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let src = _mm256_set_ph( - 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, - 19.0, + unsafe fn test_mm256_mask_fcmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let src = _mm256_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, ); - let r = _mm256_mask_div_ph(src, 0b0101010101010101, a, b); - let e = _mm256_set_ph( - 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, + let r = _mm256_mask_fcmul_pch(src, 0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_div_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let r = _mm256_maskz_div_ph(0b0101010101010101, a, b); - let e = _mm256_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + unsafe fn test_mm256_maskz_fcmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let r = _mm256_maskz_fcmul_pch(0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_div_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let r = _mm512_div_ph(a, b); - let e = _mm512_set1_ph(0.5); + unsafe fn test_mm512_fcmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_fcmul_pch(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_div_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let src = _mm512_set_ph( - 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, - 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, - 33.0, 34.0, 35.0, - ); - let r = _mm512_mask_div_ph(src, 0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, - 20.0, 0.5, 22.0, 0.5, 24.0, 0.5, 26.0, 0.5, 28.0, 0.5, 30.0, 0.5, 32.0, 0.5, 34.0, 0.5, + unsafe fn test_mm512_mask_fcmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, + ); + let r = _mm512_mask_fcmul_pch(src, 0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_div_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let r = _mm512_maskz_div_ph(0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, - 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + unsafe fn test_mm512_maskz_fcmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_maskz_fcmul_pch(0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_div_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let r = _mm512_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_ph(0.5); + unsafe fn test_mm512_fcmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_div_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let src = _mm512_set_ph( - 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, - 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, - 33.0, 34.0, 35.0, - ); - let r = _mm512_mask_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask_fcmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, + ); + let r = _mm512_mask_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, - 0b01010101010101010101010101010101, + 0b0101010101010101, a, b, ); - let e = _mm512_set_ph( - 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, - 20.0, 0.5, 22.0, 0.5, 24.0, 0.5, 26.0, 0.5, 28.0, 0.5, 30.0, 0.5, 32.0, 0.5, 34.0, 0.5, + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_div_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let r = _mm512_maskz_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, + unsafe fn test_mm512_maskz_fcmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_maskz_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, a, b, ); - let e = _mm512_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, - 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_div_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_set_sh(0.5); + unsafe fn test_mm_fcmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_fcmul_sch(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_div_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_set_sh(4.0); - assert_eq_m128h(r, e); - let r = _mm_mask_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_set_sh(0.5); + unsafe fn test_mm_mask_fcmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_fcmul_sch(src, 0, a, b); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_div_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = - _mm_maskz_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_set_sh(0.0); - assert_eq_m128h(r, e); - let r = - _mm_maskz_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_set_sh(0.5); + unsafe fn test_mm_maskz_fcmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_maskz_fcmul_sch(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_div_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_div_sh(a, b); - let e = _mm_set_sh(0.5); + unsafe fn test_mm_fcmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_div_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_div_sh(src, 0, a, b); - let e = _mm_set_sh(4.0); - assert_eq_m128h(r, e); - let r = _mm_mask_div_sh(src, 1, a, b); - let e = _mm_set_sh(0.5); + unsafe fn test_mm_mask_fcmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_div_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_maskz_div_sh(0, a, b); - let e = _mm_set_sh(0.0); - assert_eq_m128h(r, e); - let r = _mm_maskz_div_sh(1, a, b); - let e = _mm_set_sh(0.5); + unsafe fn test_mm_maskz_fcmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = + _mm_maskz_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } } From 0bec23b9ff5b680c0808bf3e6ba905cb5b099515 Mon Sep 17 00:00:00 2001 From: sayantn Date: Fri, 12 Jul 2024 12:39:31 +0530 Subject: [PATCH 04/11] AVX512FP16 Part 3: FMA --- crates/core_arch/missing-x86.md | 188 - crates/core_arch/src/x86/avx512fp16.rs | 9660 +++++++++++++++++++----- 2 files changed, 7894 insertions(+), 1954 deletions(-) diff --git a/crates/core_arch/missing-x86.md b/crates/core_arch/missing-x86.md index c66e1e728c..08b3ab9a18 100644 --- a/crates/core_arch/missing-x86.md +++ b/crates/core_arch/missing-x86.md @@ -55,10 +55,8 @@ * [ ] [`_mm256_cvtsh_h`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtsh_h) * [ ] [`_mm256_set1_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set1_pch) - * [ ] [`_mm512_abs_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_abs_ph) * [ ] [`_mm512_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_ph_mask) * [ ] [`_mm512_cmp_round_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_round_ph_mask) - * [ ] [`_mm512_conj_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_conj_pch) * [ ] [`_mm512_cvt_roundepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundepi16_ph) * [ ] [`_mm512_cvt_roundepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundepi32_ph) * [ ] [`_mm512_cvt_roundepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundepi64_ph) @@ -104,47 +102,14 @@ * [ ] [`_mm512_cvtx_roundps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtx_roundps_ph) * [ ] [`_mm512_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtxph_ps) * [ ] [`_mm512_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtxps_ph) - * [ ] [`_mm512_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fcmadd_pch) - * [ ] [`_mm512_fcmadd_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fcmadd_round_pch) - * [ ] [`_mm512_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmadd_pch) - * [ ] [`_mm512_fmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmadd_ph) - * [ ] [`_mm512_fmadd_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmadd_round_pch) - * [ ] [`_mm512_fmadd_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmadd_round_ph) - * [ ] [`_mm512_fmaddsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmaddsub_ph) - * [ ] [`_mm512_fmaddsub_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmaddsub_round_ph) - * [ ] [`_mm512_fmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmsub_ph) - * [ ] [`_mm512_fmsub_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmsub_round_ph) - * [ ] [`_mm512_fmsubadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmsubadd_ph) - * [ ] [`_mm512_fmsubadd_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmsubadd_round_ph) - * [ ] [`_mm512_fnmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fnmadd_ph) - * [ ] [`_mm512_fnmadd_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fnmadd_round_ph) - * [ ] [`_mm512_fnmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fnmsub_ph) - * [ ] [`_mm512_fnmsub_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fnmsub_round_ph) * [ ] [`_mm512_fpclass_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fpclass_ph_mask) * [ ] [`_mm512_getexp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_getexp_ph) * [ ] [`_mm512_getexp_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_getexp_round_ph) * [ ] [`_mm512_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_getmant_ph) * [ ] [`_mm512_getmant_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_getmant_round_ph) - * [ ] [`_mm512_mask3_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fcmadd_pch) - * [ ] [`_mm512_mask3_fcmadd_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fcmadd_round_pch) - * [ ] [`_mm512_mask3_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmadd_pch) - * [ ] [`_mm512_mask3_fmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmadd_ph) - * [ ] [`_mm512_mask3_fmadd_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmadd_round_pch) - * [ ] [`_mm512_mask3_fmadd_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmadd_round_ph) - * [ ] [`_mm512_mask3_fmaddsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmaddsub_ph) - * [ ] [`_mm512_mask3_fmaddsub_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmaddsub_round_ph) - * [ ] [`_mm512_mask3_fmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmsub_ph) - * [ ] [`_mm512_mask3_fmsub_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmsub_round_ph) - * [ ] [`_mm512_mask3_fmsubadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmsubadd_ph) - * [ ] [`_mm512_mask3_fmsubadd_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmsubadd_round_ph) - * [ ] [`_mm512_mask3_fnmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fnmadd_ph) - * [ ] [`_mm512_mask3_fnmadd_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fnmadd_round_ph) - * [ ] [`_mm512_mask3_fnmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fnmsub_ph) - * [ ] [`_mm512_mask3_fnmsub_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fnmsub_round_ph) * [ ] [`_mm512_mask_blend_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_blend_ph) * [ ] [`_mm512_mask_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_ph_mask) * [ ] [`_mm512_mask_cmp_round_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_round_ph_mask) - * [ ] [`_mm512_mask_conj_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_conj_pch) * [ ] [`_mm512_mask_cvt_roundepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundepi16_ph) * [ ] [`_mm512_mask_cvt_roundepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundepi32_ph) * [ ] [`_mm512_mask_cvt_roundepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundepi64_ph) @@ -189,22 +154,6 @@ * [ ] [`_mm512_mask_cvtx_roundps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtx_roundps_ph) * [ ] [`_mm512_mask_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtxph_ps) * [ ] [`_mm512_mask_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtxps_ph) - * [ ] [`_mm512_mask_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fcmadd_pch) - * [ ] [`_mm512_mask_fcmadd_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fcmadd_round_pch) - * [ ] [`_mm512_mask_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmadd_pch) - * [ ] [`_mm512_mask_fmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmadd_ph) - * [ ] [`_mm512_mask_fmadd_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmadd_round_pch) - * [ ] [`_mm512_mask_fmadd_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmadd_round_ph) - * [ ] [`_mm512_mask_fmaddsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmaddsub_ph) - * [ ] [`_mm512_mask_fmaddsub_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmaddsub_round_ph) - * [ ] [`_mm512_mask_fmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmsub_ph) - * [ ] [`_mm512_mask_fmsub_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmsub_round_ph) - * [ ] [`_mm512_mask_fmsubadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmsubadd_ph) - * [ ] [`_mm512_mask_fmsubadd_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmsubadd_round_ph) - * [ ] [`_mm512_mask_fnmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fnmadd_ph) - * [ ] [`_mm512_mask_fnmadd_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fnmadd_round_ph) - * [ ] [`_mm512_mask_fnmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fnmsub_ph) - * [ ] [`_mm512_mask_fnmsub_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fnmsub_round_ph) * [ ] [`_mm512_mask_fpclass_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fpclass_ph_mask) * [ ] [`_mm512_mask_getexp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_getexp_ph) * [ ] [`_mm512_mask_getexp_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_getexp_round_ph) @@ -224,7 +173,6 @@ * [ ] [`_mm512_mask_scalef_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_scalef_round_ph) * [ ] [`_mm512_mask_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sqrt_ph) * [ ] [`_mm512_mask_sqrt_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sqrt_round_ph) - * [ ] [`_mm512_maskz_conj_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_conj_pch) * [ ] [`_mm512_maskz_cvt_roundepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepi16_ph) * [ ] [`_mm512_maskz_cvt_roundepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepi32_ph) * [ ] [`_mm512_maskz_cvt_roundepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepi64_ph) @@ -269,22 +217,6 @@ * [ ] [`_mm512_maskz_cvtx_roundps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtx_roundps_ph) * [ ] [`_mm512_maskz_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtxph_ps) * [ ] [`_mm512_maskz_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtxps_ph) - * [ ] [`_mm512_maskz_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fcmadd_pch) - * [ ] [`_mm512_maskz_fcmadd_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fcmadd_round_pch) - * [ ] [`_mm512_maskz_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmadd_pch) - * [ ] [`_mm512_maskz_fmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmadd_ph) - * [ ] [`_mm512_maskz_fmadd_round_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmadd_round_pch) - * [ ] [`_mm512_maskz_fmadd_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmadd_round_ph) - * [ ] [`_mm512_maskz_fmaddsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmaddsub_ph) - * [ ] [`_mm512_maskz_fmaddsub_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmaddsub_round_ph) - * [ ] [`_mm512_maskz_fmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmsub_ph) - * [ ] [`_mm512_maskz_fmsub_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmsub_round_ph) - * [ ] [`_mm512_maskz_fmsubadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmsubadd_ph) - * [ ] [`_mm512_maskz_fmsubadd_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmsubadd_round_ph) - * [ ] [`_mm512_maskz_fnmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fnmadd_ph) - * [ ] [`_mm512_maskz_fnmadd_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fnmadd_round_ph) - * [ ] [`_mm512_maskz_fnmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fnmsub_ph) - * [ ] [`_mm512_maskz_fnmsub_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fnmsub_round_ph) * [ ] [`_mm512_maskz_getexp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_getexp_ph) * [ ] [`_mm512_maskz_getexp_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_getexp_round_ph) * [ ] [`_mm512_maskz_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_getmant_ph) @@ -359,35 +291,11 @@ * [ ] [`_mm_cvttsh_u64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsh_u64) * [ ] [`_mm_cvtu32_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtu32_sh) * [ ] [`_mm_cvtu64_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtu64_sh) - * [ ] [`_mm_fcmadd_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fcmadd_round_sch) - * [ ] [`_mm_fcmadd_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fcmadd_sch) - * [ ] [`_mm_fmadd_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmadd_round_sch) - * [ ] [`_mm_fmadd_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmadd_round_sh) - * [ ] [`_mm_fmadd_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmadd_sch) - * [ ] [`_mm_fmadd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmadd_sh) - * [ ] [`_mm_fmsub_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmsub_round_sh) - * [ ] [`_mm_fmsub_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmsub_sh) - * [ ] [`_mm_fnmadd_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fnmadd_round_sh) - * [ ] [`_mm_fnmadd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fnmadd_sh) - * [ ] [`_mm_fnmsub_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fnmsub_round_sh) - * [ ] [`_mm_fnmsub_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fnmsub_sh) * [ ] [`_mm_fpclass_sh_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fpclass_sh_mask) * [ ] [`_mm_getexp_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getexp_round_sh) * [ ] [`_mm_getexp_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getexp_sh) * [ ] [`_mm_getmant_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getmant_round_sh) * [ ] [`_mm_getmant_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getmant_sh) - * [ ] [`_mm_mask3_fcmadd_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fcmadd_round_sch) - * [ ] [`_mm_mask3_fcmadd_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fcmadd_sch) - * [ ] [`_mm_mask3_fmadd_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmadd_round_sch) - * [ ] [`_mm_mask3_fmadd_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmadd_round_sh) - * [ ] [`_mm_mask3_fmadd_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmadd_sch) - * [ ] [`_mm_mask3_fmadd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmadd_sh) - * [ ] [`_mm_mask3_fmsub_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmsub_round_sh) - * [ ] [`_mm_mask3_fmsub_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmsub_sh) - * [ ] [`_mm_mask3_fnmadd_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fnmadd_round_sh) - * [ ] [`_mm_mask3_fnmadd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fnmadd_sh) - * [ ] [`_mm_mask3_fnmsub_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fnmsub_round_sh) - * [ ] [`_mm_mask3_fnmsub_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fnmsub_sh) * [ ] [`_mm_mask_cvt_roundsd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvt_roundsd_sh) * [ ] [`_mm_mask_cvt_roundsh_sd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvt_roundsh_sd) * [ ] [`_mm_mask_cvt_roundsh_ss`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvt_roundsh_ss) @@ -396,18 +304,6 @@ * [ ] [`_mm_mask_cvtsh_sd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsh_sd) * [ ] [`_mm_mask_cvtsh_ss`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsh_ss) * [ ] [`_mm_mask_cvtss_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtss_sh) - * [ ] [`_mm_mask_fcmadd_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fcmadd_round_sch) - * [ ] [`_mm_mask_fcmadd_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fcmadd_sch) - * [ ] [`_mm_mask_fmadd_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmadd_round_sch) - * [ ] [`_mm_mask_fmadd_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmadd_round_sh) - * [ ] [`_mm_mask_fmadd_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmadd_sch) - * [ ] [`_mm_mask_fmadd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmadd_sh) - * [ ] [`_mm_mask_fmsub_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmsub_round_sh) - * [ ] [`_mm_mask_fmsub_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmsub_sh) - * [ ] [`_mm_mask_fnmadd_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fnmadd_round_sh) - * [ ] [`_mm_mask_fnmadd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fnmadd_sh) - * [ ] [`_mm_mask_fnmsub_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fnmsub_round_sh) - * [ ] [`_mm_mask_fnmsub_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fnmsub_sh) * [ ] [`_mm_mask_fpclass_sh_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fpclass_sh_mask) * [ ] [`_mm_mask_getexp_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getexp_round_sh) * [ ] [`_mm_mask_getexp_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getexp_sh) @@ -431,18 +327,6 @@ * [ ] [`_mm_maskz_cvtsh_sd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtsh_sd) * [ ] [`_mm_maskz_cvtsh_ss`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtsh_ss) * [ ] [`_mm_maskz_cvtss_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtss_sh) - * [ ] [`_mm_maskz_fcmadd_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fcmadd_round_sch) - * [ ] [`_mm_maskz_fcmadd_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fcmadd_sch) - * [ ] [`_mm_maskz_fmadd_round_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmadd_round_sch) - * [ ] [`_mm_maskz_fmadd_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmadd_round_sh) - * [ ] [`_mm_maskz_fmadd_sch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmadd_sch) - * [ ] [`_mm_maskz_fmadd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmadd_sh) - * [ ] [`_mm_maskz_fmsub_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmsub_round_sh) - * [ ] [`_mm_maskz_fmsub_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmsub_sh) - * [ ] [`_mm_maskz_fnmadd_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fnmadd_round_sh) - * [ ] [`_mm_maskz_fnmadd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fnmadd_sh) - * [ ] [`_mm_maskz_fnmsub_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fnmsub_round_sh) - * [ ] [`_mm_maskz_fnmsub_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fnmsub_sh) * [ ] [`_mm_maskz_getexp_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getexp_round_sh) * [ ] [`_mm_maskz_getexp_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getexp_sh) * [ ] [`_mm_maskz_getmant_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getmant_round_sh) @@ -473,9 +357,7 @@
["AVX512_FP16", "AVX512VL"]

- * [ ] [`_mm256_abs_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_abs_ph) * [ ] [`_mm256_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmp_ph_mask) - * [ ] [`_mm256_conj_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_conj_pch) * [ ] [`_mm256_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi16_ph) * [ ] [`_mm256_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi32_ph) * [ ] [`_mm256_cvtepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi64_ph) @@ -498,28 +380,11 @@ * [ ] [`_mm256_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvttph_epu64) * [ ] [`_mm256_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtxph_ps) * [ ] [`_mm256_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtxps_ph) - * [ ] [`_mm256_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fcmadd_pch) - * [ ] [`_mm256_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fmadd_pch) - * [ ] [`_mm256_fmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fmadd_ph) - * [ ] [`_mm256_fmaddsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fmaddsub_ph) - * [ ] [`_mm256_fmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fmsub_ph) - * [ ] [`_mm256_fmsubadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fmsubadd_ph) - * [ ] [`_mm256_fnmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fnmadd_ph) - * [ ] [`_mm256_fnmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fnmsub_ph) * [ ] [`_mm256_fpclass_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fpclass_ph_mask) * [ ] [`_mm256_getexp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_getexp_ph) * [ ] [`_mm256_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_getmant_ph) - * [ ] [`_mm256_mask3_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fcmadd_pch) - * [ ] [`_mm256_mask3_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fmadd_pch) - * [ ] [`_mm256_mask3_fmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fmadd_ph) - * [ ] [`_mm256_mask3_fmaddsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fmaddsub_ph) - * [ ] [`_mm256_mask3_fmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fmsub_ph) - * [ ] [`_mm256_mask3_fmsubadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fmsubadd_ph) - * [ ] [`_mm256_mask3_fnmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fnmadd_ph) - * [ ] [`_mm256_mask3_fnmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fnmsub_ph) * [ ] [`_mm256_mask_blend_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_blend_ph) * [ ] [`_mm256_mask_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmp_ph_mask) - * [ ] [`_mm256_mask_conj_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_conj_pch) * [ ] [`_mm256_mask_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi16_ph) * [ ] [`_mm256_mask_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi32_ph) * [ ] [`_mm256_mask_cvtepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi64_ph) @@ -542,14 +407,6 @@ * [ ] [`_mm256_mask_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvttph_epu64) * [ ] [`_mm256_mask_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtxph_ps) * [ ] [`_mm256_mask_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtxps_ph) - * [ ] [`_mm256_mask_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fcmadd_pch) - * [ ] [`_mm256_mask_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmadd_pch) - * [ ] [`_mm256_mask_fmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmadd_ph) - * [ ] [`_mm256_mask_fmaddsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmaddsub_ph) - * [ ] [`_mm256_mask_fmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmsub_ph) - * [ ] [`_mm256_mask_fmsubadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmsubadd_ph) - * [ ] [`_mm256_mask_fnmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fnmadd_ph) - * [ ] [`_mm256_mask_fnmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fnmsub_ph) * [ ] [`_mm256_mask_fpclass_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fpclass_ph_mask) * [ ] [`_mm256_mask_getexp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_getexp_ph) * [ ] [`_mm256_mask_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_getmant_ph) @@ -561,7 +418,6 @@ * [ ] [`_mm256_mask_rsqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_rsqrt_ph) * [ ] [`_mm256_mask_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_scalef_ph) * [ ] [`_mm256_mask_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_sqrt_ph) - * [ ] [`_mm256_maskz_conj_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_conj_pch) * [ ] [`_mm256_maskz_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi16_ph) * [ ] [`_mm256_maskz_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi32_ph) * [ ] [`_mm256_maskz_cvtepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi64_ph) @@ -584,14 +440,6 @@ * [ ] [`_mm256_maskz_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvttph_epu64) * [ ] [`_mm256_maskz_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtxph_ps) * [ ] [`_mm256_maskz_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtxps_ph) - * [ ] [`_mm256_maskz_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fcmadd_pch) - * [ ] [`_mm256_maskz_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmadd_pch) - * [ ] [`_mm256_maskz_fmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmadd_ph) - * [ ] [`_mm256_maskz_fmaddsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmaddsub_ph) - * [ ] [`_mm256_maskz_fmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmsub_ph) - * [ ] [`_mm256_maskz_fmsubadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmsubadd_ph) - * [ ] [`_mm256_maskz_fnmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fnmadd_ph) - * [ ] [`_mm256_maskz_fnmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fnmsub_ph) * [ ] [`_mm256_maskz_getexp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_getexp_ph) * [ ] [`_mm256_maskz_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_getmant_ph) * [ ] [`_mm256_maskz_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_max_ph) @@ -616,9 +464,7 @@ * [ ] [`_mm256_rsqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_rsqrt_ph) * [ ] [`_mm256_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_scalef_ph) * [ ] [`_mm256_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sqrt_ph) - * [ ] [`_mm_abs_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_ph) * [ ] [`_mm_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_ph_mask) - * [ ] [`_mm_conj_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_conj_pch) * [ ] [`_mm_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi16_ph) * [ ] [`_mm_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi32_ph) * [ ] [`_mm_cvtepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi64_ph) @@ -641,28 +487,11 @@ * [ ] [`_mm_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttph_epu64) * [ ] [`_mm_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtxph_ps) * [ ] [`_mm_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtxps_ph) - * [ ] [`_mm_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fcmadd_pch) - * [ ] [`_mm_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmadd_pch) - * [ ] [`_mm_fmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmadd_ph) - * [ ] [`_mm_fmaddsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmaddsub_ph) - * [ ] [`_mm_fmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmsub_ph) - * [ ] [`_mm_fmsubadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmsubadd_ph) - * [ ] [`_mm_fnmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fnmadd_ph) - * [ ] [`_mm_fnmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fnmsub_ph) * [ ] [`_mm_fpclass_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fpclass_ph_mask) * [ ] [`_mm_getexp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getexp_ph) * [ ] [`_mm_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getmant_ph) - * [ ] [`_mm_mask3_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fcmadd_pch) - * [ ] [`_mm_mask3_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmadd_pch) - * [ ] [`_mm_mask3_fmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmadd_ph) - * [ ] [`_mm_mask3_fmaddsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmaddsub_ph) - * [ ] [`_mm_mask3_fmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmsub_ph) - * [ ] [`_mm_mask3_fmsubadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmsubadd_ph) - * [ ] [`_mm_mask3_fnmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fnmadd_ph) - * [ ] [`_mm_mask3_fnmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fnmsub_ph) * [ ] [`_mm_mask_blend_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_blend_ph) * [ ] [`_mm_mask_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_ph_mask) - * [ ] [`_mm_mask_conj_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_conj_pch) * [ ] [`_mm_mask_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi16_ph) * [ ] [`_mm_mask_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi32_ph) * [ ] [`_mm_mask_cvtepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi64_ph) @@ -685,14 +514,6 @@ * [ ] [`_mm_mask_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvttph_epu64) * [ ] [`_mm_mask_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtxph_ps) * [ ] [`_mm_mask_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtxps_ph) - * [ ] [`_mm_mask_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fcmadd_pch) - * [ ] [`_mm_mask_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmadd_pch) - * [ ] [`_mm_mask_fmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmadd_ph) - * [ ] [`_mm_mask_fmaddsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmaddsub_ph) - * [ ] [`_mm_mask_fmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmsub_ph) - * [ ] [`_mm_mask_fmsubadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmsubadd_ph) - * [ ] [`_mm_mask_fnmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fnmadd_ph) - * [ ] [`_mm_mask_fnmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fnmsub_ph) * [ ] [`_mm_mask_fpclass_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fpclass_ph_mask) * [ ] [`_mm_mask_getexp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getexp_ph) * [ ] [`_mm_mask_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getmant_ph) @@ -708,7 +529,6 @@ * [ ] [`_mm_mask_rsqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_rsqrt_ph) * [ ] [`_mm_mask_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_scalef_ph) * [ ] [`_mm_mask_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sqrt_ph) - * [ ] [`_mm_maskz_conj_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_conj_pch) * [ ] [`_mm_maskz_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi16_ph) * [ ] [`_mm_maskz_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi32_ph) * [ ] [`_mm_maskz_cvtepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi64_ph) @@ -731,14 +551,6 @@ * [ ] [`_mm_maskz_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvttph_epu64) * [ ] [`_mm_maskz_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtxph_ps) * [ ] [`_mm_maskz_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtxps_ph) - * [ ] [`_mm_maskz_fcmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fcmadd_pch) - * [ ] [`_mm_maskz_fmadd_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmadd_pch) - * [ ] [`_mm_maskz_fmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmadd_ph) - * [ ] [`_mm_maskz_fmaddsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmaddsub_ph) - * [ ] [`_mm_maskz_fmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmsub_ph) - * [ ] [`_mm_maskz_fmsubadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmsubadd_ph) - * [ ] [`_mm_maskz_fnmadd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fnmadd_ph) - * [ ] [`_mm_maskz_fnmsub_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fnmsub_ph) * [ ] [`_mm_maskz_getexp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getexp_ph) * [ ] [`_mm_maskz_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getmant_ph) * [ ] [`_mm_maskz_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_max_ph) diff --git a/crates/core_arch/src/x86/avx512fp16.rs b/crates/core_arch/src/x86/avx512fp16.rs index a2a31d87e9..11e5f7d8e9 100644 --- a/crates/core_arch/src/x86/avx512fp16.rs +++ b/crates/core_arch/src/x86/avx512fp16.rs @@ -2304,7 +2304,7 @@ pub unsafe fn _mm_mul_pch(a: __m128h, b: __m128h) -> __m128h { } /// Multiply packed complex numbers in a and b, and store the results in dst using writemask k (the element -/// is copied from src when mask bit 0 is not set). Each complex number is composed of two adjacent +/// is copied from src when corresponding mask bit is not set). Each complex number is composed of two adjacent /// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_mul_pch) @@ -2317,7 +2317,7 @@ pub unsafe fn _mm_mask_mul_pch(src: __m128h, k: __mmask8, a: __m128h, b: __m128h } /// Multiply packed complex numbers in a and b, and store the results in dst using zeromask k (the element -/// is zeroed out when mask bit 0 is not set). Each complex number is composed of two adjacent +/// is zeroed out when corresponding mask bit is not set). Each complex number is composed of two adjacent /// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_mul_pch) @@ -2343,7 +2343,7 @@ pub unsafe fn _mm256_mul_pch(a: __m256h, b: __m256h) -> __m256h { } /// Multiply packed complex numbers in a and b, and store the results in dst using writemask k (the element -/// is copied from src when mask bit 0 is not set). Each complex number is composed of two adjacent +/// is copied from src when corresponding mask bit is not set). Each complex number is composed of two adjacent /// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_mul_pch) @@ -2356,7 +2356,7 @@ pub unsafe fn _mm256_mask_mul_pch(src: __m256h, k: __mmask8, a: __m256h, b: __m2 } /// Multiply packed complex numbers in a and b, and store the results in dst using zeromask k (the element -/// is zeroed out when mask bit 0 is not set). Each complex number is composed of two adjacent +/// is zeroed out when corresponding mask bit is not set). Each complex number is composed of two adjacent /// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_mul_pch) @@ -2382,7 +2382,7 @@ pub unsafe fn _mm512_mul_pch(a: __m512h, b: __m512h) -> __m512h { } /// Multiply packed complex numbers in a and b, and store the results in dst using writemask k (the element -/// is copied from src when mask bit 0 is not set). Each complex number is composed of two adjacent +/// is copied from src when corresponding mask bit is not set). Each complex number is composed of two adjacent /// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_mul_pch) @@ -2395,7 +2395,7 @@ pub unsafe fn _mm512_mask_mul_pch(src: __m512h, k: __mmask16, a: __m512h, b: __m } /// Multiply packed complex numbers in a and b, and store the results in dst using zeromask k (the element -/// is zeroed out when mask bit 0 is not set). Each complex number is composed of two adjacent +/// is zeroed out when corresponding mask bit is not set). Each complex number is composed of two adjacent /// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_mul_pch) @@ -2431,7 +2431,7 @@ pub unsafe fn _mm512_mul_round_pch(a: __m512h, b: __m512h) } /// Multiply the packed complex numbers in a and b, and store the results in dst using writemask k (the element -/// is copied from src when mask bit 0 is not set). Each complex number is composed of two adjacent +/// is copied from src when corresponding mask bit is not set). Each complex number is composed of two adjacent /// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. /// /// Rounding is done according to the rounding parameter, which can be one of: @@ -2465,7 +2465,7 @@ pub unsafe fn _mm512_mask_mul_round_pch( } /// Multiply the packed complex numbers in a and b, and store the results in dst using zeromask k (the element -/// is zeroed out when mask bit 0 is not set). Each complex number is composed of two adjacent +/// is zeroed out when corresponding mask bit is not set). Each complex number is composed of two adjacent /// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. /// /// Rounding is done according to the rounding parameter, which can be one of: @@ -2634,7 +2634,7 @@ pub unsafe fn _mm_fmul_pch(a: __m128h, b: __m128h) -> __m128h { } /// Multiply packed complex numbers in a and b, and store the results in dst using writemask k (the element -/// is copied from src when mask bit 0 is not set). Each complex number is composed of two adjacent +/// is copied from src when corresponding mask bit is not set). Each complex number is composed of two adjacent /// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmul_pch) @@ -2647,7 +2647,7 @@ pub unsafe fn _mm_mask_fmul_pch(src: __m128h, k: __mmask8, a: __m128h, b: __m128 } /// Multiply packed complex numbers in a and b, and store the results in dst using zeromask k (the element -/// is zeroed out when mask bit 0 is not set). Each complex number is composed of two adjacent half-precision +/// is zeroed out when corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision /// (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmul_pch) @@ -2673,7 +2673,7 @@ pub unsafe fn _mm256_fmul_pch(a: __m256h, b: __m256h) -> __m256h { } /// Multiply packed complex numbers in a and b, and store the results in dst using writemask k (the element -/// is copied from src when mask bit 0 is not set). Each complex number is composed of two adjacent half-precision +/// is copied from src when corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision /// (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmul_pch) @@ -2686,7 +2686,7 @@ pub unsafe fn _mm256_mask_fmul_pch(src: __m256h, k: __mmask8, a: __m256h, b: __m } /// Multiply packed complex numbers in a and b, and store the results in dst using zeromask k (the element -/// is zeroed out when mask bit 0 is not set). Each complex number is composed of two adjacent half-precision +/// is zeroed out when corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision /// (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmul_pch) @@ -2711,7 +2711,7 @@ pub unsafe fn _mm512_fmul_pch(a: __m512h, b: __m512h) -> __m512h { } /// Multiply packed complex numbers in a and b, and store the results in dst using writemask k (the element -/// is copied from src when mask bit 0 is not set). Each complex number is composed of two adjacent half-precision +/// is copied from src when corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision /// (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmul_pch) @@ -2724,7 +2724,7 @@ pub unsafe fn _mm512_mask_fmul_pch(src: __m512h, k: __mmask16, a: __m512h, b: __ } /// Multiply packed complex numbers in a and b, and store the results in dst using zeromask k (the element -/// is zeroed out when mask bit 0 is not set). Each complex number is composed of two adjacent half-precision +/// is zeroed out when corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision /// (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmul_pch) @@ -2758,7 +2758,7 @@ pub unsafe fn _mm512_fmul_round_pch(a: __m512h, b: __m512h) } /// Multiply packed complex numbers in a and b, and store the results in dst using writemask k (the element -/// is copied from src when mask bit 0 is not set). Each complex number is composed of two adjacent half-precision +/// is copied from src when corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision /// (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. /// Rounding is done according to the rounding parameter, which can be one of: /// @@ -2785,7 +2785,7 @@ pub unsafe fn _mm512_mask_fmul_round_pch( } /// Multiply packed complex numbers in a and b, and store the results in dst using zeromask k (the element -/// is zeroed out when mask bit 0 is not set). Each complex number is composed of two adjacent half-precision +/// is zeroed out when corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision /// (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. /// Rounding is done according to the rounding parameter, which can be one of: /// @@ -2941,7 +2941,7 @@ pub unsafe fn _mm_cmul_pch(a: __m128h, b: __m128h) -> __m128h { } /// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and -/// store the results in dst using writemask k (the element is copied from src when mask bit 0 is not set). +/// store the results in dst using writemask k (the element is copied from src when corresponding mask bit is not set). /// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which /// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. /// @@ -2955,7 +2955,7 @@ pub unsafe fn _mm_mask_cmul_pch(src: __m128h, k: __mmask8, a: __m128h, b: __m128 } /// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and -/// store the results in dst using zeromask k (the element is zeroed out when mask bit 0 is not set). +/// store the results in dst using zeromask k (the element is zeroed out when corresponding mask bit is not set). /// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which /// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. /// @@ -2983,7 +2983,7 @@ pub unsafe fn _mm256_cmul_pch(a: __m256h, b: __m256h) -> __m256h { } /// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and -/// store the results in dst using writemask k (the element is copied from src when mask bit 0 is not set). +/// store the results in dst using writemask k (the element is copied from src when corresponding mask bit is not set). /// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which /// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. /// @@ -2997,7 +2997,7 @@ pub unsafe fn _mm256_mask_cmul_pch(src: __m256h, k: __mmask8, a: __m256h, b: __m } /// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and -/// store the results in dst using zeromask k (the element is zeroed out when mask bit 0 is not set). +/// store the results in dst using zeromask k (the element is zeroed out when corresponding mask bit is not set). /// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which /// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. /// @@ -3025,7 +3025,7 @@ pub unsafe fn _mm512_cmul_pch(a: __m512h, b: __m512h) -> __m512h { } /// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and -/// store the results in dst using writemask k (the element is copied from src when mask bit 0 is not set). +/// store the results in dst using writemask k (the element is copied from src when corresponding mask bit is not set). /// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which /// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. /// @@ -3039,7 +3039,7 @@ pub unsafe fn _mm512_mask_cmul_pch(src: __m512h, k: __mmask16, a: __m512h, b: __ } /// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and -/// store the results in dst using zeromask k (the element is zeroed out when mask bit 0 is not set). +/// store the results in dst using zeromask k (the element is zeroed out when corresponding mask bit is not set). /// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which /// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. /// @@ -3077,7 +3077,7 @@ pub unsafe fn _mm512_cmul_round_pch(a: __m512h, b: __m512h) } /// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and -/// store the results in dst using writemask k (the element is copied from src when mask bit 0 is not set). +/// store the results in dst using writemask k (the element is copied from src when corresponding mask bit is not set). /// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which /// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. /// @@ -3112,7 +3112,7 @@ pub unsafe fn _mm512_mask_cmul_round_pch( } /// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and -/// store the results in dst using zeromask k (the element is zeroed out when mask bit 0 is not set). +/// store the results in dst using zeromask k (the element is zeroed out when corresponding mask bit is not set). /// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which /// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. /// @@ -3281,7 +3281,7 @@ pub unsafe fn _mm_fcmul_pch(a: __m128h, b: __m128h) -> __m128h { } /// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and -/// store the results in dst using writemask k (the element is copied from src when mask bit 0 is not set). +/// store the results in dst using writemask k (the element is copied from src when corresponding mask bit is not set). /// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which /// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. /// @@ -3295,7 +3295,7 @@ pub unsafe fn _mm_mask_fcmul_pch(src: __m128h, k: __mmask8, a: __m128h, b: __m12 } /// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and -/// store the results in dst using zeromask k (the element is zeroed out when mask bit 0 is not set). +/// store the results in dst using zeromask k (the element is zeroed out when corresponding mask bit is not set). /// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which /// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. /// @@ -3323,7 +3323,7 @@ pub unsafe fn _mm256_fcmul_pch(a: __m256h, b: __m256h) -> __m256h { } /// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and -/// store the results in dst using writemask k (the element is copied from src when mask bit 0 is not set). +/// store the results in dst using writemask k (the element is copied from src when corresponding mask bit is not set). /// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which /// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. /// @@ -3337,7 +3337,7 @@ pub unsafe fn _mm256_mask_fcmul_pch(src: __m256h, k: __mmask8, a: __m256h, b: __ } /// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and -/// store the results in dst using zeromask k (the element is zeroed out when mask bit 0 is not set). +/// store the results in dst using zeromask k (the element is zeroed out when corresponding mask bit is not set). /// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which /// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. /// @@ -3365,7 +3365,7 @@ pub unsafe fn _mm512_fcmul_pch(a: __m512h, b: __m512h) -> __m512h { } /// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and -/// store the results in dst using writemask k (the element is copied from src when mask bit 0 is not set). +/// store the results in dst using writemask k (the element is copied from src when corresponding mask bit is not set). /// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which /// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. /// @@ -3379,7 +3379,7 @@ pub unsafe fn _mm512_mask_fcmul_pch(src: __m512h, k: __mmask16, a: __m512h, b: _ } /// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and -/// store the results in dst using zeromask k (the element is zeroed out when mask bit 0 is not set). +/// store the results in dst using zeromask k (the element is zeroed out when corresponding mask bit is not set). /// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which /// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. /// @@ -3416,7 +3416,7 @@ pub unsafe fn _mm512_fcmul_round_pch(a: __m512h, b: __m512h } /// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and -/// store the results in dst using writemask k (the element is copied from src when mask bit 0 is not set). +/// store the results in dst using writemask k (the element is copied from src when corresponding mask bit is not set). /// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which /// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. /// @@ -3445,7 +3445,7 @@ pub unsafe fn _mm512_mask_fcmul_round_pch( } /// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, and -/// store the results in dst using zeromask k (the element is zeroed out when mask bit 0 is not set). +/// store the results in dst using zeromask k (the element is zeroed out when corresponding mask bit is not set). /// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which /// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. /// @@ -3594,2682 +3594,8810 @@ pub unsafe fn _mm_maskz_fcmul_round_sch( _mm_maskz_cmul_round_sch::(k, a, b) } -#[allow(improper_ctypes)] -extern "C" { - #[link_name = "llvm.x86.avx512fp16.mask.cmp.sh"] - fn vcmpsh(a: __m128h, b: __m128h, imm8: i32, mask: __mmask8, sae: i32) -> __mmask8; - #[link_name = "llvm.x86.avx512fp16.vcomi.sh"] - fn vcomish(a: __m128h, b: __m128h, imm8: i32, sae: i32) -> i32; +/// Finds the absolute value of each packed half-precision (16-bit) floating-point element in v2, storing +/// the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_abs_ph(v2: __m128h) -> __m128h { + transmute(_mm_and_si128(transmute(v2), _mm_set1_epi16(i16::MAX))) +} - #[link_name = "llvm.x86.avx512fp16.add.ph.512"] - fn vaddph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.sub.ph.512"] - fn vsubph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mul.ph.512"] - fn vmulph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.div.ph.512"] - fn vdivph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; +/// Finds the absolute value of each packed half-precision (16-bit) floating-point element in v2, storing +/// the result in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_abs_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_abs_ph(v2: __m256h) -> __m256h { + transmute(_mm256_and_si256(transmute(v2), _mm256_set1_epi16(i16::MAX))) +} - #[link_name = "llvm.x86.avx512fp16.mask.add.sh.round"] - fn vaddsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.sub.sh.round"] - fn vsubsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.mul.sh.round"] - fn vmulsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.div.sh.round"] - fn vdivsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; +/// Finds the absolute value of each packed half-precision (16-bit) floating-point element in v2, storing +/// the result in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_abs_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_abs_ph(v2: __m512h) -> __m512h { + transmute(_mm512_and_si512(transmute(v2), _mm512_set1_epi16(i16::MAX))) +} - #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.128"] - fn vfmulcph_128(a: __m128, b: __m128, src: __m128, k: __mmask8) -> __m128; - #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.256"] - fn vfmulcph_256(a: __m256, b: __m256, src: __m256, k: __mmask8) -> __m256; - #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.512"] - fn vfmulcph_512(a: __m512, b: __m512, src: __m512, k: __mmask16, rounding: i32) -> __m512; - #[link_name = "llvm.x86.avx512fp16.mask.vfmul.csh"] - fn vfmulcsh(a: __m128, b: __m128, src: __m128, k: __mmask8, rounding: i32) -> __m128; +/// Compute the complex conjugates of complex numbers in a, and store the results in dst. Each complex +/// number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines +/// the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate +/// `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_conj_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_conj_pch(a: __m128h) -> __m128h { + transmute(_mm_xor_si128(transmute(a), _mm_set1_epi32(i32::MIN))) +} - #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.128"] - fn vfcmulcph_128(a: __m128, b: __m128, src: __m128, k: __mmask8) -> __m128; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.256"] - fn vfcmulcph_256(a: __m256, b: __m256, src: __m256, k: __mmask8) -> __m256; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.512"] - fn vfcmulcph_512(a: __m512, b: __m512, src: __m512, k: __mmask16, rounding: i32) -> __m512; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.csh"] - fn vfcmulcsh(a: __m128, b: __m128, src: __m128, k: __mmask8, rounding: i32) -> __m128; +/// Compute the complex conjugates of complex numbers in a, and store the results in dst using writemask k +/// (the element is copied from src when corresponding mask bit is not set). Each complex number is composed of two +/// adjacent half-precision (16-bit) floating-point elements, which defines the complex number +/// `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_conj_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_conj_pch(src: __m128h, k: __mmask8, a: __m128h) -> __m128h { + let r: __m128 = transmute(_mm_conj_pch(a)); + transmute(simd_select_bitmask(k, r, transmute(src))) +} +/// Compute the complex conjugates of complex numbers in a, and store the results in dst using zeromask k +/// (the element is zeroed out when corresponding mask bit is not set). Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_conj_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_conj_pch(k: __mmask8, a: __m128h) -> __m128h { + _mm_mask_conj_pch(_mm_setzero_ph(), k, a) } -#[cfg(test)] -mod tests { - use crate::core_arch::x86::*; - use crate::mem::transmute; - use crate::ptr::{addr_of, addr_of_mut}; - use stdarch_test::simd_test; +/// Compute the complex conjugates of complex numbers in a, and store the results in dst. Each complex number +/// is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex +/// number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_conj_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_conj_pch(a: __m256h) -> __m256h { + transmute(_mm256_xor_si256(transmute(a), _mm256_set1_epi32(i32::MIN))) +} - #[target_feature(enable = "avx512fp16")] - unsafe fn _mm_set1_pch(re: f16, im: f16) -> __m128h { - _mm_setr_ph(re, im, re, im, re, im, re, im) - } +/// Compute the complex conjugates of complex numbers in a, and store the results in dst using writemask k +/// (the element is copied from src when corresponding mask bit is not set). Each complex number is composed of two +/// adjacent half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_conj_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_conj_pch(src: __m256h, k: __mmask8, a: __m256h) -> __m256h { + let r: __m256 = transmute(_mm256_conj_pch(a)); + transmute(simd_select_bitmask(k, r, transmute(src))) +} - #[target_feature(enable = "avx512fp16")] - unsafe fn _mm256_set1_pch(re: f16, im: f16) -> __m256h { - _mm256_setr_ph( - re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, - ) - } +/// Compute the complex conjugates of complex numbers in a, and store the results in dst using zeromask k +/// (the element is zeroed out when corresponding mask bit is not set). Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_conj_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_conj_pch(k: __mmask8, a: __m256h) -> __m256h { + _mm256_mask_conj_pch(_mm256_setzero_ph(), k, a) +} - #[target_feature(enable = "avx512fp16")] - unsafe fn _mm512_set1_pch(re: f16, im: f16) -> __m512h { - _mm512_setr_ph( - re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, - re, im, re, im, re, im, re, im, re, im, - ) - } +/// Compute the complex conjugates of complex numbers in a, and store the results in dst. Each complex number +/// is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex +/// number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_conj_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_conj_pch(a: __m512h) -> __m512h { + transmute(_mm512_xor_si512(transmute(a), _mm512_set1_epi32(i32::MIN))) +} - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_set_ph() { - let r = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let e = _mm_setr_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - assert_eq_m128h(r, e); - } +/// Compute the complex conjugates of complex numbers in a, and store the results in dst using writemask k +/// (the element is copied from src when corresponding mask bit is not set). Each complex number is composed of two +/// adjacent half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_conj_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_conj_pch(src: __m512h, k: __mmask16, a: __m512h) -> __m512h { + let r: __m512 = transmute(_mm512_conj_pch(a)); + transmute(simd_select_bitmask(k, r, transmute(src))) +} - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_set_ph() { - let r = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let e = _mm256_setr_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - assert_eq_m256h(r, e); - } +/// Compute the complex conjugates of complex numbers in a, and store the results in dst using zeromask k +/// (the element is zeroed out when corresponding mask bit is not set). Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_conj_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_conj_pch(k: __mmask16, a: __m512h) -> __m512h { + _mm512_mask_conj_pch(_mm512_setzero_ph(), k, a) +} - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_set_ph() { - let r = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let e = _mm512_setr_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - assert_eq_m512h(r, e); - } +/// Multiply packed complex numbers in a and b, accumulate to the corresponding complex numbers in c, +/// and store the results in dst. Each complex number is composed of two adjacent half-precision (16-bit) +/// floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmadd_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmaddcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fmadd_pch(a: __m128h, b: __m128h, c: __m128h) -> __m128h { + _mm_mask3_fmadd_pch(a, b, c, 0xff) +} - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_set_sh() { - let r = _mm_set_sh(1.0); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0); - assert_eq_m128h(r, e); - } +/// Multiply packed complex numbers in a and b, accumulate to the corresponding complex numbers in c, +/// and store the results in dst using writemask k (the element is copied from a when the corresponding +/// mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) +/// floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmadd_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmaddcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fmadd_pch(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { + let r: __m128 = transmute(_mm_mask3_fmadd_pch(a, b, c, k)); // using `0xff` would have been fine here, but this is what CLang does + transmute(simd_select_bitmask(k, r, transmute(a))) +} - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_set1_ph() { - let r = _mm_set1_ph(1.0); - let e = _mm_set_ph(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0); - assert_eq_m128h(r, e); - } +/// Multiply packed complex numbers in a and b, accumulate to the corresponding complex numbers in c, +/// and store the results in dst using writemask k (the element is copied from c when the corresponding +/// mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) +/// floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmadd_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmaddcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask3_fmadd_pch(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { + transmute(vfmaddcph_mask3_128( + transmute(a), + transmute(b), + transmute(c), + k, + )) +} - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_set1_ph() { - let r = _mm256_set1_ph(1.0); - let e = _mm256_set_ph( - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - ); - assert_eq_m256h(r, e); - } +/// Multiply packed complex numbers in a and b, accumulate to the corresponding complex numbers in c, +/// and store the results in dst using zeromask k (the element is zeroed out when the corresponding mask +/// bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point +/// elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmadd_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmaddcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_fmadd_pch(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { + transmute(vfmaddcph_maskz_128( + transmute(a), + transmute(b), + transmute(c), + k, + )) +} - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_set1_ph() { - let r = _mm512_set1_ph(1.0); - let e = _mm512_set_ph( - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - ); - assert_eq_m512h(r, e); - } +/// Multiply packed complex numbers in a and b, accumulate to the corresponding complex numbers in c, +/// and store the results in dst. Each complex number is composed of two adjacent half-precision (16-bit) +/// floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fmadd_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmaddcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_fmadd_pch(a: __m256h, b: __m256h, c: __m256h) -> __m256h { + _mm256_mask3_fmadd_pch(a, b, c, 0xff) +} - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_setr_ph() { - let r = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let e = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - assert_eq_m128h(r, e); - } +/// Multiply packed complex numbers in a and b, accumulate to the corresponding complex numbers in c, +/// and store the results in dst using writemask k (the element is copied from a when the corresponding mask +/// bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point +/// elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmadd_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmaddcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_fmadd_pch(a: __m256h, k: __mmask8, b: __m256h, c: __m256h) -> __m256h { + let r: __m256 = transmute(_mm256_mask3_fmadd_pch(a, b, c, k)); // using `0xff` would have been fine here, but this is what CLang does + transmute(simd_select_bitmask(k, r, transmute(a))) +} - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_setr_ph() { - let r = _mm256_setr_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let e = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - assert_eq_m256h(r, e); - } +/// Multiply packed complex numbers in a and b, accumulate to the corresponding complex numbers in c, +/// and store the results in dst using writemask k (the element is copied from c when the corresponding +/// mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) +/// floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fmadd_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmaddcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask3_fmadd_pch(a: __m256h, b: __m256h, c: __m256h, k: __mmask8) -> __m256h { + transmute(vfmaddcph_mask3_256( + transmute(a), + transmute(b), + transmute(c), + k, + )) +} + +/// Multiply packed complex numbers in a and b, accumulate to the corresponding complex numbers in c, +/// and store the results in dst using zeromask k (the element is zeroed out when the corresponding mask +/// bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point +/// elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmadd_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmaddcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_fmadd_pch(k: __mmask8, a: __m256h, b: __m256h, c: __m256h) -> __m256h { + transmute(vfmaddcph_maskz_256( + transmute(a), + transmute(b), + transmute(c), + k, + )) +} + +/// Multiply packed complex numbers in a and b, accumulate to the corresponding complex numbers in c, +/// and store the results in dst. Each complex number is composed of two adjacent half-precision (16-bit) +/// floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmadd_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmaddcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_fmadd_pch(a: __m512h, b: __m512h, c: __m512h) -> __m512h { + _mm512_fmadd_round_pch::<_MM_FROUND_CUR_DIRECTION>(a, b, c) +} + +/// Multiply packed complex numbers in a and b, accumulate to the corresponding complex numbers in c, +/// and store the results in dst using writemask k (the element is copied from a when the corresponding mask +/// bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point +/// elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmadd_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmaddcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_fmadd_pch(a: __m512h, k: __mmask16, b: __m512h, c: __m512h) -> __m512h { + _mm512_mask_fmadd_round_pch::<_MM_FROUND_CUR_DIRECTION>(a, k, b, c) +} + +/// Multiply packed complex numbers in a and b, accumulate to the corresponding complex numbers in c, +/// and store the results in dst using writemask k (the element is copied from c when the corresponding +/// mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) +/// floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmadd_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmaddcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask3_fmadd_pch(a: __m512h, b: __m512h, c: __m512h, k: __mmask16) -> __m512h { + _mm512_mask3_fmadd_round_pch::<_MM_FROUND_CUR_DIRECTION>(a, b, c, k) +} + +/// Multiply packed complex numbers in a and b, accumulate to the corresponding complex numbers in c, +/// and store the results in dst using zeromask k (the element is zeroed out when the corresponding mask +/// bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point +/// elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmadd_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmaddcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_fmadd_pch(k: __mmask16, a: __m512h, b: __m512h, c: __m512h) -> __m512h { + _mm512_maskz_fmadd_round_pch::<_MM_FROUND_CUR_DIRECTION>(k, a, b, c) +} + +/// Multiply packed complex numbers in a and b, accumulate to the corresponding complex numbers in c, +/// and store the results in dst. Each complex number is composed of two adjacent half-precision (16-bit) +/// floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmadd_round_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmaddcph, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_fmadd_round_pch( + a: __m512h, + b: __m512h, + c: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + _mm512_mask3_fmadd_round_pch::(a, b, c, 0xffff) +} + +/// Multiply packed complex numbers in a and b, accumulate to the corresponding complex numbers in c, +/// and store the results in dst using writemask k (the element is copied from a when the corresponding mask +/// bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point +/// elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmadd_round_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmaddcph, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_fmadd_round_pch( + a: __m512h, + k: __mmask16, + b: __m512h, + c: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + let r: __m512 = transmute(_mm512_mask3_fmadd_round_pch::(a, b, c, k)); // using `0xffff` would have been fine here, but this is what CLang does + transmute(simd_select_bitmask(k, r, transmute(a))) +} + +/// Multiply packed complex numbers in a and b, accumulate to the corresponding complex numbers in c, +/// and store the results in dst using writemask k (the element is copied from c when the corresponding +/// mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) +/// floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmadd_round_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmaddcph, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask3_fmadd_round_pch( + a: __m512h, + b: __m512h, + c: __m512h, + k: __mmask16, +) -> __m512h { + static_assert_rounding!(ROUNDING); + transmute(vfmaddcph_mask3_512( + transmute(a), + transmute(b), + transmute(c), + k, + ROUNDING, + )) +} + +/// Multiply packed complex numbers in a and b, accumulate to the corresponding complex numbers in c, +/// and store the results in dst using zeromask k (the element is zeroed out when the corresponding mask +/// bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point +/// elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmadd_round_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmaddcph, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_fmadd_round_pch( + k: __mmask16, + a: __m512h, + b: __m512h, + c: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + transmute(vfmaddcph_maskz_512( + transmute(a), + transmute(b), + transmute(c), + k, + ROUNDING, + )) +} + +/// Multiply the lower complex numbers in a and b, accumulate to the lower complex number in c, and +/// store the result in the lower elements of dst, and copy the upper 6 packed elements from a to the +/// upper elements of dst. Each complex number is composed of two adjacent half-precision (16-bit) +/// floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmadd_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmaddcsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fmadd_sch(a: __m128h, b: __m128h, c: __m128h) -> __m128h { + _mm_fmadd_round_sch::<_MM_FROUND_CUR_DIRECTION>(a, b, c) +} + +/// Multiply the lower complex numbers in a and b, accumulate to the lower complex number in c, and +/// store the result in the lower elements of dst using writemask k (elements are copied from a when +/// mask bit 0 is not set), and copy the upper 6 packed elements from a to the upper elements of dst. +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, +/// which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmadd_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmaddcsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fmadd_sch(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { + _mm_mask_fmadd_round_sch::<_MM_FROUND_CUR_DIRECTION>(a, k, b, c) +} + +/// Multiply the lower complex numbers in a and b, accumulate to the lower complex number in c, and +/// store the result in the lower elements of dst using writemask k (elements are copied from c when +/// mask bit 0 is not set), and copy the upper 6 packed elements from a to the upper elements of dst. +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, +/// which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmadd_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmaddcsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask3_fmadd_sch(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { + _mm_mask3_fmadd_round_sch::<_MM_FROUND_CUR_DIRECTION>(a, b, c, k) +} + +/// Multiply the lower complex numbers in a and b, accumulate to the lower complex number in c, and +/// store the result in the lower elements of dst using zeromask k (elements are zeroed out when mask +/// bit 0 is not set), and copy the upper 6 packed elements from a to the upper elements of dst. Each +/// complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which +/// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmadd_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmaddcsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_fmadd_sch(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { + _mm_maskz_fmadd_round_sch::<_MM_FROUND_CUR_DIRECTION>(k, a, b, c) +} + +/// Multiply the lower complex numbers in a and b, accumulate to the lower complex number in c, and +/// store the result in the lower elements of dst. Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmadd_round_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmaddcsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fmadd_round_sch( + a: __m128h, + b: __m128h, + c: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + transmute(vfmaddcsh_mask( + transmute(a), + transmute(b), + transmute(c), + 0xff, + ROUNDING, + )) +} + +/// Multiply the lower complex numbers in a and b, accumulate to the lower complex number in c, and +/// store the result in the lower elements of dst using writemask k (elements are copied from a when +/// mask bit 0 is not set), and copy the upper 6 packed elements from a to the upper elements of dst. +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, +/// which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmadd_round_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmaddcsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fmadd_round_sch( + a: __m128h, + k: __mmask8, + b: __m128h, + c: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + let a = transmute(a); + let r = vfmaddcsh_mask(a, transmute(b), transmute(c), k, ROUNDING); // using `0xff` would have been fine here, but this is what CLang does + transmute(_mm_mask_move_ss(a, k, a, r)) +} + +/// Multiply the lower complex numbers in a and b, accumulate to the lower complex number in c, and +/// store the result in the lower elements of dst using writemask k (elements are copied from c when +/// mask bit 0 is not set), and copy the upper 6 packed elements from a to the upper elements of dst. +/// Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, +/// which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmadd_round_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmaddcsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask3_fmadd_round_sch( + a: __m128h, + b: __m128h, + c: __m128h, + k: __mmask8, +) -> __m128h { + static_assert_rounding!(ROUNDING); + let c = transmute(c); + let r = vfmaddcsh_mask(transmute(a), transmute(b), c, k, ROUNDING); + transmute(_mm_move_ss(c, r)) +} + +/// Multiply the lower complex numbers in a and b, accumulate to the lower complex number in c, and +/// store the result in the lower elements of dst using zeromask k (elements are zeroed out when mask +/// bit 0 is not set), and copy the upper 6 packed elements from a to the upper elements of dst. Each +/// complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which +/// defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmadd_round_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmaddcsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_fmadd_round_sch( + k: __mmask8, + a: __m128h, + b: __m128h, + c: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + let a = transmute(a); + let r = vfmaddcsh_maskz(a, transmute(b), transmute(c), k, ROUNDING); + transmute(_mm_move_ss(a, r)) // FIXME: If `k == 0`, then LLVM optimized `vfmaddcsh_maskz` to output an all-zero vector, which is incorrect +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, accumulate +/// to the corresponding complex numbers in c, and store the results in dst. Each complex number is composed +/// of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number +/// `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fcmadd_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfcmaddcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fcmadd_pch(a: __m128h, b: __m128h, c: __m128h) -> __m128h { + _mm_mask3_fcmadd_pch(a, b, c, 0xff) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, accumulate +/// to the corresponding complex numbers in c, and store the results in dst using writemask k (the element is +/// copied from a when the corresponding mask bit is not set). Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fcmadd_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfcmaddcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fcmadd_pch(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { + let r: __m128 = transmute(_mm_mask3_fcmadd_pch(a, b, c, k)); // using `0xff` would have been fine here, but this is what CLang does + transmute(simd_select_bitmask(k, r, transmute(a))) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, accumulate +/// to the corresponding complex numbers in c, and store the results in dst using writemask k (the element is +/// copied from c when the corresponding mask bit is not set). Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fcmadd_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfcmaddcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask3_fcmadd_pch(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { + transmute(vfcmaddcph_mask3_128( + transmute(a), + transmute(b), + transmute(c), + k, + )) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, accumulate +/// to the corresponding complex numbers in c, and store the results in dst using zeromask k (the element is +/// zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fcmadd_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfcmaddcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_fcmadd_pch(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { + transmute(vfcmaddcph_maskz_128( + transmute(a), + transmute(b), + transmute(c), + k, + )) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, accumulate +/// to the corresponding complex numbers in c, and store the results in dst. Each complex number is composed +/// of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number +/// `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fcmadd_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfcmaddcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_fcmadd_pch(a: __m256h, b: __m256h, c: __m256h) -> __m256h { + _mm256_mask3_fcmadd_pch(a, b, c, 0xff) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, accumulate +/// to the corresponding complex numbers in c, and store the results in dst using writemask k (the element is +/// copied from a when the corresponding mask bit is not set). Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fcmadd_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfcmaddcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_fcmadd_pch(a: __m256h, k: __mmask8, b: __m256h, c: __m256h) -> __m256h { + let r: __m256 = transmute(_mm256_mask3_fcmadd_pch(a, b, c, k)); // using `0xff` would have been fine here, but this is what CLang does + transmute(simd_select_bitmask(k, r, transmute(a))) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, accumulate +/// to the corresponding complex numbers in c, and store the results in dst using writemask k (the element is +/// copied from c when the corresponding mask bit is not set). Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fcmadd_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfcmaddcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask3_fcmadd_pch(a: __m256h, b: __m256h, c: __m256h, k: __mmask8) -> __m256h { + transmute(vfcmaddcph_mask3_256( + transmute(a), + transmute(b), + transmute(c), + k, + )) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, accumulate +/// to the corresponding complex numbers in c, and store the results in dst using zeromask k (the element is +/// zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fcmadd_pch) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfcmaddcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_fcmadd_pch(k: __mmask8, a: __m256h, b: __m256h, c: __m256h) -> __m256h { + transmute(vfcmaddcph_maskz_256( + transmute(a), + transmute(b), + transmute(c), + k, + )) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, accumulate +/// to the corresponding complex numbers in c, and store the results in dst. Each complex number is composed +/// of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number +/// `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fcmadd_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmaddcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_fcmadd_pch(a: __m512h, b: __m512h, c: __m512h) -> __m512h { + _mm512_fcmadd_round_pch::<_MM_FROUND_CUR_DIRECTION>(a, b, c) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, accumulate +/// to the corresponding complex numbers in c, and store the results in dst using writemask k (the element is +/// copied from a when the corresponding mask bit is not set). Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fcmadd_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmaddcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_fcmadd_pch(a: __m512h, k: __mmask16, b: __m512h, c: __m512h) -> __m512h { + _mm512_mask_fcmadd_round_pch::<_MM_FROUND_CUR_DIRECTION>(a, k, b, c) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, accumulate +/// to the corresponding complex numbers in c, and store the results in dst using writemask k (the element is +/// copied from c when the corresponding mask bit is not set). Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fcmadd_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmaddcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask3_fcmadd_pch(a: __m512h, b: __m512h, c: __m512h, k: __mmask16) -> __m512h { + _mm512_mask3_fcmadd_round_pch::<_MM_FROUND_CUR_DIRECTION>(a, b, c, k) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, accumulate +/// to the corresponding complex numbers in c, and store the results in dst using zeromask k (the element is +/// zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fcmadd_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmaddcph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_fcmadd_pch(k: __mmask16, a: __m512h, b: __m512h, c: __m512h) -> __m512h { + _mm512_maskz_fcmadd_round_pch::<_MM_FROUND_CUR_DIRECTION>(k, a, b, c) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, accumulate +/// to the corresponding complex numbers in c, and store the results in dst. Each complex number is composed +/// of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number +/// `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fcmadd_round_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmaddcph, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_fcmadd_round_pch( + a: __m512h, + b: __m512h, + c: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + _mm512_mask3_fcmadd_round_pch::(a, b, c, 0xffff) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, accumulate +/// to the corresponding complex numbers in c, and store the results in dst using writemask k (the element is +/// copied from a when the corresponding mask bit is not set). Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fcmadd_round_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmaddcph, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_fcmadd_round_pch( + a: __m512h, + k: __mmask16, + b: __m512h, + c: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + let r: __m512 = transmute(_mm512_mask3_fcmadd_round_pch::(a, b, c, k)); // using `0xffff` would have been fine here, but this is what CLang does + transmute(simd_select_bitmask(k, r, transmute(a))) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, accumulate +/// to the corresponding complex numbers in c using writemask k (the element is copied from c when the corresponding +/// mask bit is not set), and store the results in dst. Each complex number is composed of two adjacent half-precision +/// (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1`, or the complex +/// conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fcmadd_round_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmaddcph, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask3_fcmadd_round_pch( + a: __m512h, + b: __m512h, + c: __m512h, + k: __mmask16, +) -> __m512h { + static_assert_rounding!(ROUNDING); + transmute(vfcmaddcph_mask3_512( + transmute(a), + transmute(b), + transmute(c), + k, + ROUNDING, + )) +} + +/// Multiply packed complex numbers in a by the complex conjugates of packed complex numbers in b, accumulate +/// to the corresponding complex numbers in c using zeromask k (the element is zeroed out when the corresponding +/// mask bit is not set), and store the results in dst. Each complex number is composed of two adjacent half-precision +/// (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1`, or the complex +/// conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fcmadd_round_pch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmaddcph, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_fcmadd_round_pch( + k: __mmask16, + a: __m512h, + b: __m512h, + c: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + transmute(vfcmaddcph_maskz_512( + transmute(a), + transmute(b), + transmute(c), + k, + ROUNDING, + )) +} + +/// Multiply the lower complex number in a by the complex conjugate of the lower complex number in b, +/// accumulate to the lower complex number in c, and store the result in the lower elements of dst, +/// and copy the upper 6 packed elements from a to the upper elements of dst. Each complex number is +/// composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex +/// number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fcmadd_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmaddcsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fcmadd_sch(a: __m128h, b: __m128h, c: __m128h) -> __m128h { + _mm_fcmadd_round_sch::<_MM_FROUND_CUR_DIRECTION>(a, b, c) +} + +/// Multiply the lower complex number in a by the complex conjugate of the lower complex number in b, +/// accumulate to the lower complex number in c, and store the result in the lower elements of dst using +/// writemask k (the element is copied from a when the corresponding mask bit is not set), and copy the upper +/// 6 packed elements from a to the upper elements of dst. Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fcmadd_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmaddcsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fcmadd_sch(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { + _mm_mask_fcmadd_round_sch::<_MM_FROUND_CUR_DIRECTION>(a, k, b, c) +} + +/// Multiply the lower complex number in a by the complex conjugate of the lower complex number in b, +/// accumulate to the lower complex number in c, and store the result in the lower elements of dst using +/// writemask k (the element is copied from c when the corresponding mask bit is not set), and copy the upper +/// 6 packed elements from a to the upper elements of dst. Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fcmadd_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmaddcsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask3_fcmadd_sch(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { + _mm_mask3_fcmadd_round_sch::<_MM_FROUND_CUR_DIRECTION>(a, b, c, k) +} + +/// Multiply the lower complex number in a by the complex conjugate of the lower complex number in b, +/// accumulate to the lower complex number in c, and store the result in the lower elements of dst using +/// zeromask k (the element is zeroed out when the corresponding mask bit is not set), and copy the upper +/// 6 packed elements from a to the upper elements of dst. Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fcmadd_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmaddcsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_fcmadd_sch(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { + _mm_maskz_fcmadd_round_sch::<_MM_FROUND_CUR_DIRECTION>(k, a, b, c) +} + +/// Multiply the lower complex number in a by the complex conjugate of the lower complex number in b, +/// accumulate to the lower complex number in c, and store the result in the lower elements of dst, +/// and copy the upper 6 packed elements from a to the upper elements of dst. Each complex number is +/// composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex +/// number `complex = vec.fp16[0] + i * vec.fp16[1]`, or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fcmadd_round_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmaddcsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fcmadd_round_sch( + a: __m128h, + b: __m128h, + c: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + transmute(vfcmaddcsh_mask( + transmute(a), + transmute(b), + transmute(c), + 0xff, + ROUNDING, + )) +} + +/// Multiply the lower complex number in a by the complex conjugate of the lower complex number in b, +/// accumulate to the lower complex number in c, and store the result in the lower elements of dst using +/// writemask k (the element is copied from a when the corresponding mask bit is not set), and copy the upper +/// 6 packed elements from a to the upper elements of dst. Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fcmadd_round_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmaddcsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fcmadd_round_sch( + a: __m128h, + k: __mmask8, + b: __m128h, + c: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + let a = transmute(a); + let r = vfcmaddcsh_mask(a, transmute(b), transmute(c), k, ROUNDING); + transmute(_mm_mask_move_ss(a, k, a, r)) +} + +/// Multiply the lower complex number in a by the complex conjugate of the lower complex number in b, +/// accumulate to the lower complex number in c, and store the result in the lower elements of dst using +/// writemask k (the element is copied from c when the corresponding mask bit is not set), and copy the upper +/// 6 packed elements from a to the upper elements of dst. Each complex number is composed of two adjacent +/// half-precision (16-bit) floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1]`, +/// or the complex conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fcmadd_round_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmaddcsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask3_fcmadd_round_sch( + a: __m128h, + b: __m128h, + c: __m128h, + k: __mmask8, +) -> __m128h { + static_assert_rounding!(ROUNDING); + let c = transmute(c); + let r = vfcmaddcsh_mask(transmute(a), transmute(b), c, k, ROUNDING); + transmute(_mm_move_ss(c, r)) +} + +/// Multiply the lower complex number in a by the complex conjugate of the lower complex number in b, +/// accumulate to the lower complex number in c using zeromask k (the element is zeroed out when the corresponding +/// mask bit is not set), and store the result in the lower elements of dst, and copy the upper 6 packed elements +/// from a to the upper elements of dst. Each complex number is composed of two adjacent half-precision (16-bit) +/// floating-point elements, which defines the complex number `complex = vec.fp16[0] + i * vec.fp16[1`, or the complex +/// conjugate `conjugate = vec.fp16[0] - i * vec.fp16[1]`. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fcmadd_round_sch) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfcmaddcsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_fcmadd_round_sch( + k: __mmask8, + a: __m128h, + b: __m128h, + c: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + let a = transmute(a); + let r = vfcmaddcsh_maskz(a, transmute(b), transmute(c), k, ROUNDING); + transmute(_mm_move_ss(a, r)) // FIXME: If `k == 0`, then LLVM optimized `vfcmaddcsh_maskz` to output an all-zero vector, which is incorrect +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, add the intermediate +/// result to packed elements in c, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fmadd_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h { + simd_fma(a, b, c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, add the intermediate +/// result to packed elements in c, and store the results in dst using writemask k (the element is copied +/// from a when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fmadd_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { + simd_select_bitmask(k, _mm_fmadd_ph(a, b, c), a) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, add the intermediate +/// result to packed elements in c, and store the results in dst using writemask k (the element is copied +/// from c when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask3_fmadd_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { + simd_select_bitmask(k, _mm_fmadd_ph(a, b, c), c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, add the intermediate +/// result to packed elements in c, and store the results in dst using zeromask k (the element is zeroed +/// out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_fmadd_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { + simd_select_bitmask(k, _mm_fmadd_ph(a, b, c), _mm_setzero_ph()) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, add the intermediate +/// result to packed elements in c, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fmadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_fmadd_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h { + simd_fma(a, b, c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, add the intermediate +/// result to packed elements in c, and store the results in dst using writemask k (the element is copied +/// from a when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_fmadd_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) -> __m256h { + simd_select_bitmask(k, _mm256_fmadd_ph(a, b, c), a) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, add the intermediate +/// result to packed elements in c, and store the results in dst using writemask k (the element is copied +/// from c when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fmadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask3_fmadd_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16) -> __m256h { + simd_select_bitmask(k, _mm256_fmadd_ph(a, b, c), c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, add the intermediate +/// result to packed elements in c, and store the results in dst using zeromask k (the element is zeroed +/// out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_fmadd_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h) -> __m256h { + simd_select_bitmask(k, _mm256_fmadd_ph(a, b, c), _mm256_setzero_ph()) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, add the intermediate +/// result to packed elements in c, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_fmadd_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { + simd_fma(a, b, c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, add the intermediate +/// result to packed elements in c, and store the results in dst using writemask k (the element is copied +/// from a when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_fmadd_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) -> __m512h { + simd_select_bitmask(k, _mm512_fmadd_ph(a, b, c), a) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, add the intermediate +/// result to packed elements in c, and store the results in dst using writemask k (the element is copied +/// from c when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask3_fmadd_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32) -> __m512h { + simd_select_bitmask(k, _mm512_fmadd_ph(a, b, c), c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, add the intermediate +/// result to packed elements in c, and store the results in dst using zeromask k (the element is zeroed +/// out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_fmadd_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h) -> __m512h { + simd_select_bitmask(k, _mm512_fmadd_ph(a, b, c), _mm512_setzero_ph()) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, add the intermediate +/// result to packed elements in c, and store the results in dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmadd_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_fmadd_round_ph( + a: __m512h, + b: __m512h, + c: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + vfmaddph_512(a, b, c, ROUNDING) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, add the intermediate +/// result to packed elements in c, and store the results in dst using writemask k (the element is copied +/// from a when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmadd_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_fmadd_round_ph( + a: __m512h, + k: __mmask32, + b: __m512h, + c: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + simd_select_bitmask(k, _mm512_fmadd_round_ph::(a, b, c), a) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, add the intermediate +/// result to packed elements in c, and store the results in dst using writemask k (the element is copied +/// from c when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmadd_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask3_fmadd_round_ph( + a: __m512h, + b: __m512h, + c: __m512h, + k: __mmask32, +) -> __m512h { + static_assert_rounding!(ROUNDING); + simd_select_bitmask(k, _mm512_fmadd_round_ph::(a, b, c), c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, add the intermediate +/// result to packed elements in c, and store the results in dst using zeromask k (the element is zeroed +/// out when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmadd_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_fmadd_round_ph( + k: __mmask32, + a: __m512h, + b: __m512h, + c: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + simd_select_bitmask( + k, + _mm512_fmadd_round_ph::(a, b, c), + _mm512_setzero_ph(), + ) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and add the intermediate +/// result to the lower element in c. Store the result in the lower element of dst, and copy the upper +/// 7 packed elements from a to the upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmadd_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fmadd_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h { + let extracta: f16 = simd_extract!(a, 0); + let extractb: f16 = simd_extract!(b, 0); + let extractc: f16 = simd_extract!(c, 0); + let r = fmaf16(extracta, extractb, extractc); + simd_insert!(a, 0, r) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and add the intermediate +/// result to the lower element in c. Store the result in the lower element of dst using writemask k (the element +/// is copied from a when the mask bit 0 is not set), and copy the upper 7 packed elements from a to the +/// upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmadd_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fmadd_sh(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { + let mut fmadd: f16 = simd_extract!(a, 0); + if k & 1 != 0 { + let extractb: f16 = simd_extract!(b, 0); + let extractc: f16 = simd_extract!(c, 0); + fmadd = fmaf16(fmadd, extractb, extractc); + } + simd_insert!(a, 0, fmadd) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and add the intermediate +/// result to the lower element in c. Store the result in the lower element of dst using writemask k (the element +/// is copied from c when the mask bit 0 is not set), and copy the upper 7 packed elements from c to the +/// upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmadd_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask3_fmadd_sh(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { + let mut fmadd: f16 = simd_extract!(c, 0); + if k & 1 != 0 { + let extracta: f16 = simd_extract!(a, 0); + let extractb: f16 = simd_extract!(b, 0); + fmadd = fmaf16(extracta, extractb, fmadd); + } + simd_insert!(c, 0, fmadd) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and add the intermediate +/// result to the lower element in c. Store the result in the lower element of dst using zeromask k (the element +/// is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from a to the +/// upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmadd_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_fmadd_sh(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { + let mut fmadd: f16 = 0.0; + if k & 1 != 0 { + let extracta: f16 = simd_extract!(a, 0); + let extractb: f16 = simd_extract!(b, 0); + let extractc: f16 = simd_extract!(c, 0); + fmadd = fmaf16(extracta, extractb, extractc); + } + simd_insert!(a, 0, fmadd) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and add the intermediate +/// result to the lower element in c. Store the result in the lower element of dst, and copy the upper +/// 7 packed elements from a to the upper elements of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmadd_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fmadd_round_sh( + a: __m128h, + b: __m128h, + c: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + let extracta: f16 = simd_extract!(a, 0); + let extractb: f16 = simd_extract!(b, 0); + let extractc: f16 = simd_extract!(c, 0); + let r = vfmaddsh(extracta, extractb, extractc, ROUNDING); + simd_insert!(a, 0, r) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and add the intermediate +/// result to the lower element in c. Store the result in the lower element of dst using writemask k (the element +/// is copied from a when the mask bit 0 is not set), and copy the upper 7 packed elements from a to the +/// upper elements of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmadd_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fmadd_round_sh( + a: __m128h, + k: __mmask8, + b: __m128h, + c: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + let mut fmadd: f16 = simd_extract!(a, 0); + if k & 1 != 0 { + let extractb: f16 = simd_extract!(b, 0); + let extractc: f16 = simd_extract!(c, 0); + fmadd = vfmaddsh(fmadd, extractb, extractc, ROUNDING); + } + simd_insert!(a, 0, fmadd) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and add the intermediate +/// result to the lower element in c. Store the result in the lower element of dst using writemask k (the element +/// is copied from c when the mask bit 0 is not set), and copy the upper 7 packed elements from c to the +/// upper elements of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmadd_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask3_fmadd_round_sh( + a: __m128h, + b: __m128h, + c: __m128h, + k: __mmask8, +) -> __m128h { + static_assert_rounding!(ROUNDING); + let mut fmadd: f16 = simd_extract!(c, 0); + if k & 1 != 0 { + let extracta: f16 = simd_extract!(a, 0); + let extractb: f16 = simd_extract!(b, 0); + fmadd = vfmaddsh(extracta, extractb, fmadd, ROUNDING); + } + simd_insert!(c, 0, fmadd) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and add the intermediate +/// result to the lower element in c. Store the result in the lower element of dst using zeromask k (the element +/// is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from a to the +/// upper elements of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmadd_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmadd, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_fmadd_round_sh( + k: __mmask8, + a: __m128h, + b: __m128h, + c: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + let mut fmadd: f16 = 0.0; + if k & 1 != 0 { + let extracta: f16 = simd_extract!(a, 0); + let extractb: f16 = simd_extract!(b, 0); + let extractc: f16 = simd_extract!(c, 0); + fmadd = vfmaddsh(extracta, extractb, extractc, ROUNDING); + } + simd_insert!(a, 0, fmadd) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the intermediate result, and store the results in dst. +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fmsub_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h { + simd_fma(a, b, simd_neg(c)) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the intermediate result, and store the results in dst using writemask k (the element is copied +/// from a when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fmsub_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { + simd_select_bitmask(k, _mm_fmsub_ph(a, b, c), a) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the intermediate result, and store the results in dst using writemask k (the element is copied +/// from c when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask3_fmsub_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { + simd_select_bitmask(k, _mm_fmsub_ph(a, b, c), c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the intermediate result, and store the results in dst using zeromask k (the element is zeroed +/// out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_fmsub_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { + simd_select_bitmask(k, _mm_fmsub_ph(a, b, c), _mm_setzero_ph()) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the intermediate result, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fmsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_fmsub_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h { + simd_fma(a, b, simd_neg(c)) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the intermediate result, and store the results in dst using writemask k (the element is copied +/// from a when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_fmsub_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) -> __m256h { + simd_select_bitmask(k, _mm256_fmsub_ph(a, b, c), a) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the intermediate result, and store the results in dst using writemask k (the element is copied +/// from c when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fmsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask3_fmsub_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16) -> __m256h { + simd_select_bitmask(k, _mm256_fmsub_ph(a, b, c), c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the intermediate result, and store the results in dst using zeromask k (the element is zeroed +/// out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_fmsub_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h) -> __m256h { + simd_select_bitmask(k, _mm256_fmsub_ph(a, b, c), _mm256_setzero_ph()) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the intermediate result, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_fmsub_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { + simd_fma(a, b, simd_neg(c)) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the intermediate result, and store the results in dst using writemask k (the element is copied +/// from a when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_fmsub_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) -> __m512h { + simd_select_bitmask(k, _mm512_fmsub_ph(a, b, c), a) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the intermediate result, and store the results in dst using writemask k (the element is copied +/// from c when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask3_fmsub_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32) -> __m512h { + simd_select_bitmask(k, _mm512_fmsub_ph(a, b, c), c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the intermediate result, and store the results in dst using zeromask k (the element is zeroed +/// out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_fmsub_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h) -> __m512h { + simd_select_bitmask(k, _mm512_fmsub_ph(a, b, c), _mm512_setzero_ph()) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the intermediate result, and store the results in dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmsub_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_fmsub_round_ph( + a: __m512h, + b: __m512h, + c: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + vfmaddph_512(a, b, simd_neg(c), ROUNDING) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the intermediate result, and store the results in dst using writemask k (the element is copied +/// from a when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmsub_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_fmsub_round_ph( + a: __m512h, + k: __mmask32, + b: __m512h, + c: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + simd_select_bitmask(k, _mm512_fmsub_round_ph::(a, b, c), a) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the intermediate result, and store the results in dst using writemask k (the element is copied +/// from c when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmsub_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask3_fmsub_round_ph( + a: __m512h, + b: __m512h, + c: __m512h, + k: __mmask32, +) -> __m512h { + static_assert_rounding!(ROUNDING); + simd_select_bitmask(k, _mm512_fmsub_round_ph::(a, b, c), c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the intermediate result, and store the results in dst using zeromask k (the element is zeroed +/// out when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmsub_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_fmsub_round_ph( + k: __mmask32, + a: __m512h, + b: __m512h, + c: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + simd_select_bitmask( + k, + _mm512_fmsub_round_ph::(a, b, c), + _mm512_setzero_ph(), + ) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and subtract packed elements +/// in c from the intermediate result. Store the result in the lower element of dst, and copy the upper +/// 7 packed elements from a to the upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmsub_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fmsub_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h { + let extracta: f16 = simd_extract!(a, 0); + let extractb: f16 = simd_extract!(b, 0); + let extractc: f16 = simd_extract!(c, 0); + let r = fmaf16(extracta, extractb, -extractc); + simd_insert!(a, 0, r) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and subtract packed elements +/// in c from the intermediate result. Store the result in the lower element of dst using writemask k (the element +/// is copied from a when the mask bit 0 is not set), and copy the upper 7 packed elements from a to the +/// upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmsub_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fmsub_sh(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { + let mut fmsub: f16 = simd_extract!(a, 0); + if k & 1 != 0 { + let extractb: f16 = simd_extract!(b, 0); + let extractc: f16 = simd_extract!(c, 0); + fmsub = fmaf16(fmsub, extractb, -extractc); + } + simd_insert!(a, 0, fmsub) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and subtract packed elements +/// in c from the intermediate result. Store the result in the lower element of dst using writemask k (the element +/// is copied from c when the mask bit 0 is not set), and copy the upper 7 packed elements from c to the +/// upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmsub_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask3_fmsub_sh(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { + let mut fmsub: f16 = simd_extract!(c, 0); + if k & 1 != 0 { + let extracta: f16 = simd_extract!(a, 0); + let extractb: f16 = simd_extract!(b, 0); + fmsub = fmaf16(extracta, extractb, -fmsub); + } + simd_insert!(c, 0, fmsub) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and subtract packed elements +/// in c from the intermediate result. Store the result in the lower element of dst using zeromask k (the element +/// is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from a to the +/// upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmsub_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_fmsub_sh(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { + let mut fmsub: f16 = 0.0; + if k & 1 != 0 { + let extracta: f16 = simd_extract!(a, 0); + let extractb: f16 = simd_extract!(b, 0); + let extractc: f16 = simd_extract!(c, 0); + fmsub = fmaf16(extracta, extractb, -extractc); + } + simd_insert!(a, 0, fmsub) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and subtract packed elements +/// in c from the intermediate result. Store the result in the lower element of dst, and copy the upper +/// 7 packed elements from a to the upper elements of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmsub_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fmsub_round_sh( + a: __m128h, + b: __m128h, + c: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + let extracta: f16 = simd_extract!(a, 0); + let extractb: f16 = simd_extract!(b, 0); + let extractc: f16 = simd_extract!(c, 0); + let r = vfmaddsh(extracta, extractb, -extractc, ROUNDING); + simd_insert!(a, 0, r) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and subtract packed elements +/// in c from the intermediate result. Store the result in the lower element of dst using writemask k (the element +/// is copied from a when the mask bit 0 is not set), and copy the upper 7 packed elements from a to the +/// upper elements of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmsub_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fmsub_round_sh( + a: __m128h, + k: __mmask8, + b: __m128h, + c: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + let mut fmsub: f16 = simd_extract!(a, 0); + if k & 1 != 0 { + let extractb: f16 = simd_extract!(b, 0); + let extractc: f16 = simd_extract!(c, 0); + fmsub = vfmaddsh(fmsub, extractb, -extractc, ROUNDING); + } + simd_insert!(a, 0, fmsub) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and subtract packed elements +/// in c from the intermediate result. Store the result in the lower element of dst using writemask k (the element +/// is copied from c when the mask bit 0 is not set), and copy the upper 7 packed elements from c to the +/// upper elements of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmsub_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask3_fmsub_round_sh( + a: __m128h, + b: __m128h, + c: __m128h, + k: __mmask8, +) -> __m128h { + static_assert_rounding!(ROUNDING); + let mut fmsub: f16 = simd_extract!(c, 0); + if k & 1 != 0 { + let extracta: f16 = simd_extract!(a, 0); + let extractb: f16 = simd_extract!(b, 0); + fmsub = vfmaddsh(extracta, extractb, -fmsub, ROUNDING); + } + simd_insert!(c, 0, fmsub) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and subtract packed elements +/// in c from the intermediate result. Store the result in the lower element of dst using zeromask k (the element +/// is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from a to the +/// upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmsub_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmsub, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_fmsub_round_sh( + k: __mmask8, + a: __m128h, + b: __m128h, + c: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + let mut fmsub: f16 = 0.0; + if k & 1 != 0 { + let extracta: f16 = simd_extract!(a, 0); + let extractb: f16 = simd_extract!(b, 0); + let extractc: f16 = simd_extract!(c, 0); + fmsub = vfmaddsh(extracta, extractb, -extractc, ROUNDING); + } + simd_insert!(a, 0, fmsub) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract the intermediate +/// result from packed elements in c, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fnmadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfnmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fnmadd_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h { + simd_fma(simd_neg(a), b, c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract the intermediate +/// result from packed elements in c, and store the results in dst using writemask k (the element is copied +/// from a when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fnmadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfnmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fnmadd_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { + simd_select_bitmask(k, _mm_fnmadd_ph(a, b, c), a) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract the intermediate +/// result from packed elements in c, and store the results in dst using writemask k (the element is copied +/// from c when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fnmadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfnmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask3_fnmadd_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { + simd_select_bitmask(k, _mm_fnmadd_ph(a, b, c), c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract the intermediate +/// result from packed elements in c, and store the results in dst using zeromask k (the element is zeroed +/// out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fnmadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfnmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_fnmadd_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { + simd_select_bitmask(k, _mm_fnmadd_ph(a, b, c), _mm_setzero_ph()) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract the intermediate +/// result from packed elements in c, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fnmadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfnmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_fnmadd_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h { + simd_fma(simd_neg(a), b, c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract the intermediate +/// result from packed elements in c, and store the results in dst using writemask k (the element is copied +/// from a when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fnmadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfnmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_fnmadd_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) -> __m256h { + simd_select_bitmask(k, _mm256_fnmadd_ph(a, b, c), a) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract the intermediate +/// result from packed elements in c, and store the results in dst using writemask k (the element is copied +/// from c when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fnmadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfnmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask3_fnmadd_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16) -> __m256h { + simd_select_bitmask(k, _mm256_fnmadd_ph(a, b, c), c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract the intermediate +/// result from packed elements in c, and store the results in dst using zeromask k (the element is zeroed +/// out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fnmadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfnmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_fnmadd_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h) -> __m256h { + simd_select_bitmask(k, _mm256_fnmadd_ph(a, b, c), _mm256_setzero_ph()) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract the intermediate +/// result from packed elements in c, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fnmadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_fnmadd_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { + simd_fma(simd_neg(a), b, c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract the intermediate +/// result from packed elements in c, and store the results in dst using writemask k (the element is copied +/// from a when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fnmadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_fnmadd_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) -> __m512h { + simd_select_bitmask(k, _mm512_fnmadd_ph(a, b, c), a) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract the intermediate +/// result from packed elements in c, and store the results in dst using writemask k (the element is copied +/// from c when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fnmadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask3_fnmadd_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32) -> __m512h { + simd_select_bitmask(k, _mm512_fnmadd_ph(a, b, c), c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract the intermediate +/// result from packed elements in c, and store the results in dst using zeromask k (the element is zeroed +/// out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fnmadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_fnmadd_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h) -> __m512h { + simd_select_bitmask(k, _mm512_fnmadd_ph(a, b, c), _mm512_setzero_ph()) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract the intermediate +/// result from packed elements in c, and store the results in dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fnmadd_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_fnmadd_round_ph( + a: __m512h, + b: __m512h, + c: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + vfmaddph_512(simd_neg(a), b, c, ROUNDING) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract the intermediate +/// result from packed elements in c, and store the results in dst using writemask k (the element is copied +/// from a when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fnmadd_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_fnmadd_round_ph( + a: __m512h, + k: __mmask32, + b: __m512h, + c: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + simd_select_bitmask(k, _mm512_fnmadd_round_ph::(a, b, c), a) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract the intermediate +/// result from packed elements in c, and store the results in dst using writemask k (the element is copied +/// from c when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fnmadd_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask3_fnmadd_round_ph( + a: __m512h, + b: __m512h, + c: __m512h, + k: __mmask32, +) -> __m512h { + static_assert_rounding!(ROUNDING); + simd_select_bitmask(k, _mm512_fnmadd_round_ph::(a, b, c), c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract the intermediate +/// result from packed elements in c, and store the results in dst using zeromask k (the element is zeroed +/// out when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fnmadd_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_fnmadd_round_ph( + k: __mmask32, + a: __m512h, + b: __m512h, + c: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + simd_select_bitmask( + k, + _mm512_fnmadd_round_ph::(a, b, c), + _mm512_setzero_ph(), + ) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and subtract the intermediate +/// result from the lower element in c. Store the result in the lower element of dst, and copy the upper 7 packed +/// elements from a to the upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fnmadd_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fnmadd_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h { + let extracta: f16 = simd_extract!(a, 0); + let extractb: f16 = simd_extract!(b, 0); + let extractc: f16 = simd_extract!(c, 0); + let r = fmaf16(-extracta, extractb, extractc); + simd_insert!(a, 0, r) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and subtract the intermediate +/// result from the lower element in c. Store the result in the lower element of dst using writemask k (the element +/// is copied from a when the mask bit 0 is not set), and copy the upper 7 packed elements from a to the upper +/// elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fnmadd_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fnmadd_sh(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { + let mut fnmadd: f16 = simd_extract!(a, 0); + if k & 1 != 0 { + let extractb: f16 = simd_extract!(b, 0); + let extractc: f16 = simd_extract!(c, 0); + fnmadd = fmaf16(-fnmadd, extractb, extractc); + } + simd_insert!(a, 0, fnmadd) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and subtract the intermediate +/// result from the lower element in c. Store the result in the lower element of dst using writemask k (the element +/// is copied from c when the mask bit 0 is not set), and copy the upper 7 packed elements from c to the upper +/// elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fnmadd_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask3_fnmadd_sh(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { + let mut fnmadd: f16 = simd_extract!(c, 0); + if k & 1 != 0 { + let extracta: f16 = simd_extract!(a, 0); + let extractb: f16 = simd_extract!(b, 0); + fnmadd = fmaf16(-extracta, extractb, fnmadd); + } + simd_insert!(c, 0, fnmadd) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and subtract the intermediate +/// result from the lower element in c. Store the result in the lower element of dst using zeromask k (the element +/// is zeroed out when the mask bit 0 is not set), and copy the upper 7 packed elements from a to the upper +/// elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fnmadd_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_fnmadd_sh(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { + let mut fnmadd: f16 = 0.0; + if k & 1 != 0 { + let extracta: f16 = simd_extract!(a, 0); + let extractb: f16 = simd_extract!(b, 0); + let extractc: f16 = simd_extract!(c, 0); + fnmadd = fmaf16(-extracta, extractb, extractc); + } + simd_insert!(a, 0, fnmadd) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and subtract the intermediate +/// result from the lower element in c. Store the result in the lower element of dst, and copy the upper 7 packed +/// elements from a to the upper elements of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fnmadd_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fnmadd_round_sh( + a: __m128h, + b: __m128h, + c: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + let extracta: f16 = simd_extract!(a, 0); + let extractb: f16 = simd_extract!(b, 0); + let extractc: f16 = simd_extract!(c, 0); + let r = vfmaddsh(-extracta, extractb, extractc, ROUNDING); + simd_insert!(a, 0, r) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and subtract the intermediate +/// result from the lower element in c. Store the result in the lower element of dst using writemask k (the element +/// is copied from a when the mask bit 0 is not set), and copy the upper 7 packed elements from a to the upper +/// elements of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fnmadd_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fnmadd_round_sh( + a: __m128h, + k: __mmask8, + b: __m128h, + c: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + let mut fnmadd: f16 = simd_extract!(a, 0); + if k & 1 != 0 { + let extractb: f16 = simd_extract!(b, 0); + let extractc: f16 = simd_extract!(c, 0); + fnmadd = vfmaddsh(-fnmadd, extractb, extractc, ROUNDING); + } + simd_insert!(a, 0, fnmadd) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and subtract the intermediate +/// result from the lower element in c. Store the result in the lower element of dst using writemask k (the element +/// is copied from c when the mask bit 0 is not set), and copy the upper 7 packed elements from c to the upper +/// elements of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fnmadd_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask3_fnmadd_round_sh( + a: __m128h, + b: __m128h, + c: __m128h, + k: __mmask8, +) -> __m128h { + static_assert_rounding!(ROUNDING); + let mut fnmadd: f16 = simd_extract!(c, 0); + if k & 1 != 0 { + let extracta: f16 = simd_extract!(a, 0); + let extractb: f16 = simd_extract!(b, 0); + fnmadd = vfmaddsh(-extracta, extractb, fnmadd, ROUNDING); + } + simd_insert!(c, 0, fnmadd) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and subtract the intermediate +/// result from the lower element in c. Store the result in the lower element of dst using zeromask k (the element +/// is zeroed out when the mask bit 0 is not set), and copy the upper 7 packed elements from a to the upper +/// elements of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fnmadd_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmadd, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_fnmadd_round_sh( + k: __mmask8, + a: __m128h, + b: __m128h, + c: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + let mut fnmadd: f16 = 0.0; + if k & 1 != 0 { + let extracta: f16 = simd_extract!(a, 0); + let extractb: f16 = simd_extract!(b, 0); + let extractc: f16 = simd_extract!(c, 0); + fnmadd = vfmaddsh(-extracta, extractb, extractc, ROUNDING); + } + simd_insert!(a, 0, fnmadd) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the negated intermediate result, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fnmsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfnmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fnmsub_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h { + simd_fma(simd_neg(a), b, simd_neg(c)) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the negated intermediate result, and store the results in dst using writemask k (the element is +/// copied from a when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fnmsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfnmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fnmsub_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { + simd_select_bitmask(k, _mm_fnmsub_ph(a, b, c), a) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the negated intermediate result, and store the results in dst using writemask k (the element is +/// copied from c when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fnmsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfnmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask3_fnmsub_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { + simd_select_bitmask(k, _mm_fnmsub_ph(a, b, c), c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the negated intermediate result, and store the results in dst using zeromask k (the element is +/// zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fnmsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfnmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_fnmsub_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { + simd_select_bitmask(k, _mm_fnmsub_ph(a, b, c), _mm_setzero_ph()) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the negated intermediate result, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fnmsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfnmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_fnmsub_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h { + simd_fma(simd_neg(a), b, simd_neg(c)) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the negated intermediate result, and store the results in dst using writemask k (the element is +/// copied from a when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fnmsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfnmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_fnmsub_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) -> __m256h { + simd_select_bitmask(k, _mm256_fnmsub_ph(a, b, c), a) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the negated intermediate result, and store the results in dst using writemask k (the element is +/// copied from c when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fnmsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfnmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask3_fnmsub_ph(a: __m256h, b: __m256h, c: __m256h, k: __mmask16) -> __m256h { + simd_select_bitmask(k, _mm256_fnmsub_ph(a, b, c), c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the negated intermediate result, and store the results in dst using zeromask k (the element is +/// zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fnmsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfnmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_fnmsub_ph(k: __mmask16, a: __m256h, b: __m256h, c: __m256h) -> __m256h { + simd_select_bitmask(k, _mm256_fnmsub_ph(a, b, c), _mm256_setzero_ph()) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the negated intermediate result, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fnmsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_fnmsub_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { + simd_fma(simd_neg(a), b, simd_neg(c)) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the negated intermediate result, and store the results in dst using writemask k (the element is +/// copied from a when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fnmsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_fnmsub_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) -> __m512h { + simd_select_bitmask(k, _mm512_fnmsub_ph(a, b, c), a) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the negated intermediate result, and store the results in dst using writemask k (the element is +/// copied from c when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fnmsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask3_fnmsub_ph(a: __m512h, b: __m512h, c: __m512h, k: __mmask32) -> __m512h { + simd_select_bitmask(k, _mm512_fnmsub_ph(a, b, c), c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the negated intermediate result, and store the results in dst using zeromask k (the element is +/// zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fnmsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_fnmsub_ph(k: __mmask32, a: __m512h, b: __m512h, c: __m512h) -> __m512h { + simd_select_bitmask(k, _mm512_fnmsub_ph(a, b, c), _mm512_setzero_ph()) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the negated intermediate result, and store the results in dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fnmsub_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_fnmsub_round_ph( + a: __m512h, + b: __m512h, + c: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + vfmaddph_512(simd_neg(a), b, simd_neg(c), ROUNDING) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the negated intermediate result, and store the results in dst using writemask k (the element is +/// copied from a when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fnmsub_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_fnmsub_round_ph( + a: __m512h, + k: __mmask32, + b: __m512h, + c: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + simd_select_bitmask(k, _mm512_fnmsub_round_ph::(a, b, c), a) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the negated intermediate result, and store the results in dst using writemask k (the element is +/// copied from c when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fnmsub_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask3_fnmsub_round_ph( + a: __m512h, + b: __m512h, + c: __m512h, + k: __mmask32, +) -> __m512h { + static_assert_rounding!(ROUNDING); + simd_select_bitmask(k, _mm512_fnmsub_round_ph::(a, b, c), c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, subtract packed elements +/// in c from the negated intermediate result, and store the results in dst using zeromask k (the element is +/// zeroed out when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fnmsub_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_fnmsub_round_ph( + k: __mmask32, + a: __m512h, + b: __m512h, + c: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + simd_select_bitmask( + k, + _mm512_fnmsub_round_ph::(a, b, c), + _mm512_setzero_ph(), + ) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and subtract the intermediate +/// result from the lower element in c. Store the result in the lower element of dst, and copy the upper 7 packed +/// elements from a to the upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fnmsub_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fnmsub_sh(a: __m128h, b: __m128h, c: __m128h) -> __m128h { + let extracta: f16 = simd_extract!(a, 0); + let extractb: f16 = simd_extract!(b, 0); + let extractc: f16 = simd_extract!(c, 0); + let r = fmaf16(-extracta, extractb, -extractc); + simd_insert!(a, 0, r) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and subtract the intermediate +/// result from the lower element in c. Store the result in the lower element of dst using writemask k (the element +/// is copied from a when the mask bit 0 is not set), and copy the upper 7 packed elements from a to the upper +/// elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fnmsub_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fnmsub_sh(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { + let mut fnmsub: f16 = simd_extract!(a, 0); + if k & 1 != 0 { + let extractb: f16 = simd_extract!(b, 0); + let extractc: f16 = simd_extract!(c, 0); + fnmsub = fmaf16(-fnmsub, extractb, -extractc); + } + simd_insert!(a, 0, fnmsub) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and subtract the intermediate +/// result from the lower element in c. Store the result in the lower element of dst using writemask k (the element +/// is copied from c when the mask bit 0 is not set), and copy the upper 7 packed elements from c to the upper +/// elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fnmsub_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask3_fnmsub_sh(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { + let mut fnmsub: f16 = simd_extract!(c, 0); + if k & 1 != 0 { + let extracta: f16 = simd_extract!(a, 0); + let extractb: f16 = simd_extract!(b, 0); + fnmsub = fmaf16(-extracta, extractb, -fnmsub); + } + simd_insert!(c, 0, fnmsub) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and subtract the intermediate +/// result from the lower element in c. Store the result in the lower element of dst using zeromask k (the element +/// is zeroed out when the mask bit 0 is not set), and copy the upper 7 packed elements from a to the upper +/// elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fnmsub_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_fnmsub_sh(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { + let mut fnmsub: f16 = 0.0; + if k & 1 != 0 { + let extracta: f16 = simd_extract!(a, 0); + let extractb: f16 = simd_extract!(b, 0); + let extractc: f16 = simd_extract!(c, 0); + fnmsub = fmaf16(-extracta, extractb, -extractc); + } + simd_insert!(a, 0, fnmsub) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and subtract the intermediate +/// result from the lower element in c. Store the result in the lower element of dst, and copy the upper 7 packed +/// elements from a to the upper elements of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fnmsub_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fnmsub_round_sh( + a: __m128h, + b: __m128h, + c: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + let extracta: f16 = simd_extract!(a, 0); + let extractb: f16 = simd_extract!(b, 0); + let extractc: f16 = simd_extract!(c, 0); + let r = vfmaddsh(-extracta, extractb, -extractc, ROUNDING); + simd_insert!(a, 0, r) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and subtract the intermediate +/// result from the lower element in c. Store the result in the lower element of dst using writemask k (the element +/// is copied from a when the mask bit 0 is not set), and copy the upper 7 packed elements from a to the upper +/// elements of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fnmsub_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fnmsub_round_sh( + a: __m128h, + k: __mmask8, + b: __m128h, + c: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + let mut fnmsub: f16 = simd_extract!(a, 0); + if k & 1 != 0 { + let extractb: f16 = simd_extract!(b, 0); + let extractc: f16 = simd_extract!(c, 0); + fnmsub = vfmaddsh(-fnmsub, extractb, -extractc, ROUNDING); + } + simd_insert!(a, 0, fnmsub) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and subtract the intermediate +/// result from the lower element in c. Store the result in the lower element of dst using writemask k (the element +/// is copied from c when the mask bit 0 is not set), and copy the upper 7 packed elements from c to the upper +/// elements of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fnmsub_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask3_fnmsub_round_sh( + a: __m128h, + b: __m128h, + c: __m128h, + k: __mmask8, +) -> __m128h { + static_assert_rounding!(ROUNDING); + let mut fnmsub: f16 = simd_extract!(c, 0); + if k & 1 != 0 { + let extracta: f16 = simd_extract!(a, 0); + let extractb: f16 = simd_extract!(b, 0); + fnmsub = vfmaddsh(-extracta, extractb, -fnmsub, ROUNDING); + } + simd_insert!(c, 0, fnmsub) +} + +/// Multiply the lower half-precision (16-bit) floating-point elements in a and b, and subtract the intermediate +/// result from the lower element in c. Store the result in the lower element of dst using zeromask k (the element +/// is zeroed out when the mask bit 0 is not set), and copy the upper 7 packed elements from a to the upper +/// elements of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fnmsub_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfnmsub, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_fnmsub_round_sh( + k: __mmask8, + a: __m128h, + b: __m128h, + c: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + let mut fnmsub: f16 = 0.0; + if k & 1 != 0 { + let extracta: f16 = simd_extract!(a, 0); + let extractb: f16 = simd_extract!(b, 0); + let extractc: f16 = simd_extract!(c, 0); + fnmsub = vfmaddsh(-extracta, extractb, -extractc, ROUNDING); + } + simd_insert!(a, 0, fnmsub) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively add and +/// subtract packed elements in c to/from the intermediate result, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmaddsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmaddsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fmaddsub_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h { + vfmaddsubph_128(a, b, c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively add and +/// subtract packed elements in c to/from the intermediate result, and store the results in dst using writemask k +/// (the element is copied from a when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmaddsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmaddsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fmaddsub_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { + simd_select_bitmask(k, _mm_fmaddsub_ph(a, b, c), a) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively add and +/// subtract packed elements in c to/from the intermediate result, and store the results in dst using writemask k +/// (the element is copied from c when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmaddsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmaddsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask3_fmaddsub_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { + simd_select_bitmask(k, _mm_fmaddsub_ph(a, b, c), c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively add and +/// subtract packed elements in c to/from the intermediate result, and store the results in dst using zeromask k +/// (the element is zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmaddsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmaddsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_fmaddsub_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { + simd_select_bitmask(k, _mm_fmaddsub_ph(a, b, c), _mm_setzero_ph()) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively add and +/// subtract packed elements in c to/from the intermediate result, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fmaddsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmaddsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_fmaddsub_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h { + vfmaddsubph_256(a, b, c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively add and +/// subtract packed elements in c to/from the intermediate result, and store the results in dst using writemask k +/// (the element is copied from a when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmaddsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmaddsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_fmaddsub_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) -> __m256h { + simd_select_bitmask(k, _mm256_fmaddsub_ph(a, b, c), a) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively add and +/// subtract packed elements in c to/from the intermediate result, and store the results in dst using writemask k +/// (the element is copied from c when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fmaddsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmaddsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask3_fmaddsub_ph( + a: __m256h, + b: __m256h, + c: __m256h, + k: __mmask16, +) -> __m256h { + simd_select_bitmask(k, _mm256_fmaddsub_ph(a, b, c), c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively add and +/// subtract packed elements in c to/from the intermediate result, and store the results in dst using zeromask k +/// (the element is zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmaddsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmaddsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_fmaddsub_ph( + k: __mmask16, + a: __m256h, + b: __m256h, + c: __m256h, +) -> __m256h { + simd_select_bitmask(k, _mm256_fmaddsub_ph(a, b, c), _mm256_setzero_ph()) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively add and +/// subtract packed elements in c to/from the intermediate result, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmaddsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmaddsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_fmaddsub_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { + _mm512_fmaddsub_round_ph::<_MM_FROUND_CUR_DIRECTION>(a, b, c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively add and +/// subtract packed elements in c to/from the intermediate result, and store the results in dst using writemask k +/// (the element is copied from a when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmaddsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmaddsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_fmaddsub_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) -> __m512h { + simd_select_bitmask(k, _mm512_fmaddsub_ph(a, b, c), a) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively add and +/// subtract packed elements in c to/from the intermediate result, and store the results in dst using writemask k +/// (the element is copied from c when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmaddsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmaddsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask3_fmaddsub_ph( + a: __m512h, + b: __m512h, + c: __m512h, + k: __mmask32, +) -> __m512h { + simd_select_bitmask(k, _mm512_fmaddsub_ph(a, b, c), c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively add and +/// subtract packed elements in c to/from the intermediate result, and store the results in dst using zeromask k +/// (the element is zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmaddsub_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmaddsub))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_fmaddsub_ph( + k: __mmask32, + a: __m512h, + b: __m512h, + c: __m512h, +) -> __m512h { + simd_select_bitmask(k, _mm512_fmaddsub_ph(a, b, c), _mm512_setzero_ph()) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively add and +/// subtract packed elements in c to/from the intermediate result, and store the results in dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmaddsub_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_fmaddsub_round_ph( + a: __m512h, + b: __m512h, + c: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + vfmaddsubph_512(a, b, c, ROUNDING) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively add and +/// subtract packed elements in c to/from the intermediate result, and store the results in dst using writemask k +/// (the element is copied from a when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmaddsub_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_fmaddsub_round_ph( + a: __m512h, + k: __mmask32, + b: __m512h, + c: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + simd_select_bitmask(k, _mm512_fmaddsub_round_ph::(a, b, c), a) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively add and +/// subtract packed elements in c to/from the intermediate result, and store the results in dst using writemask k +/// (the element is copied from c when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmaddsub_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask3_fmaddsub_round_ph( + a: __m512h, + b: __m512h, + c: __m512h, + k: __mmask32, +) -> __m512h { + static_assert_rounding!(ROUNDING); + simd_select_bitmask(k, _mm512_fmaddsub_round_ph::(a, b, c), c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively add and +/// subtract packed elements in c to/from the intermediate result, and store the results in dst using zeromask k +/// (the element is zeroed out when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmaddsub_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmaddsub, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_fmaddsub_round_ph( + k: __mmask32, + a: __m512h, + b: __m512h, + c: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + simd_select_bitmask( + k, + _mm512_fmaddsub_round_ph::(a, b, c), + _mm512_setzero_ph(), + ) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively subtract +/// and add packed elements in c to/from the intermediate result, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmsubadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmsubadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fmsubadd_ph(a: __m128h, b: __m128h, c: __m128h) -> __m128h { + vfmaddsubph_128(a, b, simd_neg(c)) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively subtract +/// and add packed elements in c to/from the intermediate result, and store the results in dst using writemask k +/// (the element is copied from a when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fmsubadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmsubadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fmsubadd_ph(a: __m128h, k: __mmask8, b: __m128h, c: __m128h) -> __m128h { + simd_select_bitmask(k, _mm_fmsubadd_ph(a, b, c), a) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively subtract +/// and add packed elements in c to/from the intermediate result, and store the results in dst using writemask k +/// (the element is copied from c when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask3_fmsubadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmsubadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask3_fmsubadd_ph(a: __m128h, b: __m128h, c: __m128h, k: __mmask8) -> __m128h { + simd_select_bitmask(k, _mm_fmsubadd_ph(a, b, c), c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively subtract +/// and add packed elements in c to/from the intermediate result, and store the results in dst using zeromask k +/// (the element is zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_fmsubadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmsubadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_fmsubadd_ph(k: __mmask8, a: __m128h, b: __m128h, c: __m128h) -> __m128h { + simd_select_bitmask(k, _mm_fmsubadd_ph(a, b, c), _mm_setzero_ph()) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively subtract +/// and add packed elements in c to/from the intermediate result, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fmsubadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmsubadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_fmsubadd_ph(a: __m256h, b: __m256h, c: __m256h) -> __m256h { + vfmaddsubph_256(a, b, simd_neg(c)) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively subtract +/// and add packed elements in c to/from the intermediate result, and store the results in dst using writemask k +/// (the element is copied from a when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fmsubadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmsubadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_fmsubadd_ph(a: __m256h, k: __mmask16, b: __m256h, c: __m256h) -> __m256h { + simd_select_bitmask(k, _mm256_fmsubadd_ph(a, b, c), a) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively subtract +/// and add packed elements in c to/from the intermediate result, and store the results in dst using writemask k +/// (the element is copied from c when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask3_fmsubadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmsubadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask3_fmsubadd_ph( + a: __m256h, + b: __m256h, + c: __m256h, + k: __mmask16, +) -> __m256h { + simd_select_bitmask(k, _mm256_fmsubadd_ph(a, b, c), c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively subtract +/// and add packed elements in c to/from the intermediate result, and store the results in dst using zeromask k +/// (the element is zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_fmsubadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vfmsubadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_fmsubadd_ph( + k: __mmask16, + a: __m256h, + b: __m256h, + c: __m256h, +) -> __m256h { + simd_select_bitmask(k, _mm256_fmsubadd_ph(a, b, c), _mm256_setzero_ph()) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively subtract +/// and add packed elements in c to/from the intermediate result, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmsubadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmsubadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_fmsubadd_ph(a: __m512h, b: __m512h, c: __m512h) -> __m512h { + _mm512_fmsubadd_round_ph::<_MM_FROUND_CUR_DIRECTION>(a, b, c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively subtract +/// and add packed elements in c to/from the intermediate result, and store the results in dst using writemask k +/// (the element is copied from a when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmsubadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmsubadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_fmsubadd_ph(a: __m512h, k: __mmask32, b: __m512h, c: __m512h) -> __m512h { + simd_select_bitmask(k, _mm512_fmsubadd_ph(a, b, c), a) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively subtract +/// and add packed elements in c to/from the intermediate result, and store the results in dst using writemask k +/// (the element is copied from c when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmsubadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmsubadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask3_fmsubadd_ph( + a: __m512h, + b: __m512h, + c: __m512h, + k: __mmask32, +) -> __m512h { + simd_select_bitmask(k, _mm512_fmsubadd_ph(a, b, c), c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively subtract +/// and add packed elements in c to/from the intermediate result, and store the results in dst using zeromask k +/// (the element is zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmsubadd_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmsubadd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_fmsubadd_ph( + k: __mmask32, + a: __m512h, + b: __m512h, + c: __m512h, +) -> __m512h { + simd_select_bitmask(k, _mm512_fmsubadd_ph(a, b, c), _mm512_setzero_ph()) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively subtract +/// and add packed elements in c to/from the intermediate result, and store the results in dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fmsubadd_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmsubadd, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_fmsubadd_round_ph( + a: __m512h, + b: __m512h, + c: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + vfmaddsubph_512(a, b, simd_neg(c), ROUNDING) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively subtract +/// and add packed elements in c to/from the intermediate result, and store the results in dst using writemask k +/// (the element is copied from a when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fmsubadd_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmsubadd, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_fmsubadd_round_ph( + a: __m512h, + k: __mmask32, + b: __m512h, + c: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + simd_select_bitmask(k, _mm512_fmsubadd_round_ph::(a, b, c), a) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively subtract +/// and add packed elements in c to/from the intermediate result, and store the results in dst using writemask k +/// (the element is copied from c when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask3_fmsubadd_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmsubadd, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask3_fmsubadd_round_ph( + a: __m512h, + b: __m512h, + c: __m512h, + k: __mmask32, +) -> __m512h { + static_assert_rounding!(ROUNDING); + simd_select_bitmask(k, _mm512_fmsubadd_round_ph::(a, b, c), c) +} + +/// Multiply packed half-precision (16-bit) floating-point elements in a and b, alternatively subtract +/// and add packed elements in c to/from the intermediate result, and store the results in dst using zeromask k +/// (the element is zeroed out when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_fmsubadd_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfmsubadd, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_fmsubadd_round_ph( + k: __mmask32, + a: __m512h, + b: __m512h, + c: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + simd_select_bitmask( + k, + _mm512_fmsubadd_round_ph::(a, b, c), + _mm512_setzero_ph(), + ) +} + +#[allow(improper_ctypes)] +extern "C" { + #[link_name = "llvm.x86.avx512fp16.mask.cmp.sh"] + fn vcmpsh(a: __m128h, b: __m128h, imm8: i32, mask: __mmask8, sae: i32) -> __mmask8; + #[link_name = "llvm.x86.avx512fp16.vcomi.sh"] + fn vcomish(a: __m128h, b: __m128h, imm8: i32, sae: i32) -> i32; + + #[link_name = "llvm.x86.avx512fp16.add.ph.512"] + fn vaddph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.sub.ph.512"] + fn vsubph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mul.ph.512"] + fn vmulph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.div.ph.512"] + fn vdivph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + + #[link_name = "llvm.x86.avx512fp16.mask.add.sh.round"] + fn vaddsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.sub.sh.round"] + fn vsubsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.mul.sh.round"] + fn vmulsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.div.sh.round"] + fn vdivsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.128"] + fn vfmulcph_128(a: __m128, b: __m128, src: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.256"] + fn vfmulcph_256(a: __m256, b: __m256, src: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.512"] + fn vfmulcph_512(a: __m512, b: __m512, src: __m512, k: __mmask16, rounding: i32) -> __m512; + #[link_name = "llvm.x86.avx512fp16.mask.vfmul.csh"] + fn vfmulcsh(a: __m128, b: __m128, src: __m128, k: __mmask8, rounding: i32) -> __m128; + + #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.128"] + fn vfcmulcph_128(a: __m128, b: __m128, src: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.256"] + fn vfcmulcph_256(a: __m256, b: __m256, src: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.512"] + fn vfcmulcph_512(a: __m512, b: __m512, src: __m512, k: __mmask16, rounding: i32) -> __m512; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.csh"] + fn vfcmulcsh(a: __m128, b: __m128, src: __m128, k: __mmask8, rounding: i32) -> __m128; + + #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.128"] + fn vfmaddcph_mask3_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.128"] + fn vfmaddcph_maskz_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.256"] + fn vfmaddcph_mask3_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.256"] + fn vfmaddcph_maskz_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.512"] + fn vfmaddcph_mask3_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) -> __m512; + #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.512"] + fn vfmaddcph_maskz_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) -> __m512; + #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.csh"] + fn vfmaddcsh_mask(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; + #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.csh"] + fn vfmaddcsh_maskz(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; + + #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.128"] + fn vfcmaddcph_mask3_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.128"] + fn vfcmaddcph_maskz_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.256"] + fn vfcmaddcph_mask3_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.256"] + fn vfcmaddcph_maskz_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.512"] + fn vfcmaddcph_mask3_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) + -> __m512; + #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.512"] + fn vfcmaddcph_maskz_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) + -> __m512; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.csh"] + fn vfcmaddcsh_mask(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; + #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.csh"] + fn vfcmaddcsh_maskz(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; + + #[link_name = "llvm.x86.avx512fp16.vfmadd.ph.512"] + fn vfmaddph_512(a: __m512h, b: __m512h, c: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.fma.f16"] + fn fmaf16(a: f16, b: f16, c: f16) -> f16; // TODO: use `crate::intrinsics::fmaf16` when it's available + #[link_name = "llvm.x86.avx512fp16.vfmadd.f16"] + fn vfmaddsh(a: f16, b: f16, c: f16, rounding: i32) -> f16; + + #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.128"] + fn vfmaddsubph_128(a: __m128h, b: __m128h, c: __m128h) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.256"] + fn vfmaddsubph_256(a: __m256h, b: __m256h, c: __m256h) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.512"] + fn vfmaddsubph_512(a: __m512h, b: __m512h, c: __m512h, rounding: i32) -> __m512h; + +} + +#[cfg(test)] +mod tests { + use crate::core_arch::x86::*; + use crate::mem::transmute; + use crate::ptr::{addr_of, addr_of_mut}; + use stdarch_test::simd_test; + + #[target_feature(enable = "avx512fp16")] + unsafe fn _mm_set1_pch(re: f16, im: f16) -> __m128h { + _mm_setr_ph(re, im, re, im, re, im, re, im) + } + + #[target_feature(enable = "avx512fp16")] + unsafe fn _mm256_set1_pch(re: f16, im: f16) -> __m256h { + _mm256_setr_ph( + re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, + ) + } + + #[target_feature(enable = "avx512fp16")] + unsafe fn _mm512_set1_pch(re: f16, im: f16) -> __m512h { + _mm512_setr_ph( + re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, + re, im, re, im, re, im, re, im, re, im, + ) + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_set_ph() { + let r = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let e = _mm_setr_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_set_ph() { + let r = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let e = _mm256_setr_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_set_ph() { + let r = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let e = _mm512_setr_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_set_sh() { + let r = _mm_set_sh(1.0); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_set1_ph() { + let r = _mm_set1_ph(1.0); + let e = _mm_set_ph(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_set1_ph() { + let r = _mm256_set1_ph(1.0); + let e = _mm256_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_set1_ph() { + let r = _mm512_set1_ph(1.0); + let e = _mm512_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_setr_ph() { + let r = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let e = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_setr_ph() { + let r = _mm256_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let e = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_setr_ph() { + let r = _mm512_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let e = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_setzero_ph() { + let r = _mm_setzero_ph(); + let e = _mm_set1_ph(0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_setzero_ph() { + let r = _mm256_setzero_ph(); + let e = _mm256_set1_ph(0.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_setzero_ph() { + let r = _mm512_setzero_ph(); + let e = _mm512_set1_ph(0.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castsi128_ph() { + let a = _mm_set1_epi16(0x3c00); + let r = _mm_castsi128_ph(a); + let e = _mm_set1_ph(1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castsi256_ph() { + let a = _mm256_set1_epi16(0x3c00); + let r = _mm256_castsi256_ph(a); + let e = _mm256_set1_ph(1.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castsi512_ph() { + let a = _mm512_set1_epi16(0x3c00); + let r = _mm512_castsi512_ph(a); + let e = _mm512_set1_ph(1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castph_si128() { + let a = _mm_set1_ph(1.0); + let r = _mm_castph_si128(a); + let e = _mm_set1_epi16(0x3c00); + assert_eq_m128i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph_si256() { + let a = _mm256_set1_ph(1.0); + let r = _mm256_castph_si256(a); + let e = _mm256_set1_epi16(0x3c00); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph_si512() { + let a = _mm512_set1_ph(1.0); + let r = _mm512_castph_si512(a); + let e = _mm512_set1_epi16(0x3c00); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castps_ph() { + let a = _mm_castsi128_ps(_mm_set1_epi16(0x3c00)); + let r = _mm_castps_ph(a); + let e = _mm_set1_ph(1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castps_ph() { + let a = _mm256_castsi256_ps(_mm256_set1_epi16(0x3c00)); + let r = _mm256_castps_ph(a); + let e = _mm256_set1_ph(1.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castps_ph() { + let a = _mm512_castsi512_ps(_mm512_set1_epi16(0x3c00)); + let r = _mm512_castps_ph(a); + let e = _mm512_set1_ph(1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castph_ps() { + let a = _mm_castsi128_ph(_mm_set1_epi32(0x3f800000)); + let r = _mm_castph_ps(a); + let e = _mm_set1_ps(1.0); + assert_eq_m128(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph_ps() { + let a = _mm256_castsi256_ph(_mm256_set1_epi32(0x3f800000)); + let r = _mm256_castph_ps(a); + let e = _mm256_set1_ps(1.0); + assert_eq_m256(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph_ps() { + let a = _mm512_castsi512_ph(_mm512_set1_epi32(0x3f800000)); + let r = _mm512_castph_ps(a); + let e = _mm512_set1_ps(1.0); + assert_eq_m512(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castpd_ph() { + let a = _mm_castsi128_pd(_mm_set1_epi16(0x3c00)); + let r = _mm_castpd_ph(a); + let e = _mm_set1_ph(1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castpd_ph() { + let a = _mm256_castsi256_pd(_mm256_set1_epi16(0x3c00)); + let r = _mm256_castpd_ph(a); + let e = _mm256_set1_ph(1.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castpd_ph() { + let a = _mm512_castsi512_pd(_mm512_set1_epi16(0x3c00)); + let r = _mm512_castpd_ph(a); + let e = _mm512_set1_ph(1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castph_pd() { + let a = _mm_castsi128_ph(_mm_set1_epi64x(0x3ff0000000000000)); + let r = _mm_castph_pd(a); + let e = _mm_set1_pd(1.0); + assert_eq_m128d(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph_pd() { + let a = _mm256_castsi256_ph(_mm256_set1_epi64x(0x3ff0000000000000)); + let r = _mm256_castph_pd(a); + let e = _mm256_set1_pd(1.0); + assert_eq_m256d(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph_pd() { + let a = _mm512_castsi512_ph(_mm512_set1_epi64(0x3ff0000000000000)); + let r = _mm512_castph_pd(a); + let e = _mm512_set1_pd(1.0); + assert_eq_m512d(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph256_ph128() { + let a = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + let r = _mm256_castph256_ph128(a); + let e = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph512_ph128() { + let a = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., + ); + let r = _mm512_castph512_ph128(a); + let e = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph512_ph256() { + let a = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., + ); + let r = _mm512_castph512_ph256(a); + let e = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph128_ph256() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm256_castph128_ph256(a); + assert_eq_m128h(_mm256_castph256_ph128(r), a); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph128_ph512() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm512_castph128_ph512(a); + assert_eq_m128h(_mm512_castph512_ph128(r), a); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph256_ph512() { + let a = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + let r = _mm512_castph256_ph512(a); + assert_eq_m256h(_mm512_castph512_ph256(r), a); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_zextph128_ph256() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm256_zextph128_ph256(a); + let e = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 0., 0., 0., 0., 0., 0., 0., 0., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_zextph128_ph512() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm512_zextph128_ph512(a); + let e = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_zextph256_ph512() { + let a = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + let r = _mm512_zextph256_ph512(a); + let e = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cmp_round_sh_mask() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_cmp_round_sh_mask::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_cmp_round_sh_mask() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_mask_cmp_round_sh_mask::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(0, a, b); + assert_eq!(r, 0); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cmp_sh_mask() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_cmp_sh_mask::<_CMP_EQ_OQ>(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_cmp_sh_mask() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_mask_cmp_sh_mask::<_CMP_EQ_OQ>(0, a, b); + assert_eq!(r, 0); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comi_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_comi_round_sh::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comi_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_comi_sh::<_CMP_EQ_OQ>(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comieq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_comieq_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comige_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_comige_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comigt_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_comigt_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comile_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_comile_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comilt_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_comilt_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comineq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_comineq_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomieq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_ucomieq_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomige_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_ucomige_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomigt_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_ucomigt_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomile_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_ucomile_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomilt_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_ucomilt_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomineq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_ucomineq_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_load_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_load_ph(addr_of!(a).cast()); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_load_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_load_ph(addr_of!(a).cast()); + assert_eq_m256h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_load_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_load_ph(addr_of!(a).cast()); + assert_eq_m512h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_load_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_load_sh(addr_of!(a).cast()); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_load_sh() { + let a = _mm_set_sh(1.0); + let src = _mm_set_sh(2.); + let b = _mm_mask_load_sh(src, 1, addr_of!(a).cast()); + assert_eq_m128h(a, b); + let b = _mm_mask_load_sh(src, 0, addr_of!(a).cast()); + assert_eq_m128h(src, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_load_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_maskz_load_sh(1, addr_of!(a).cast()); + assert_eq_m128h(a, b); + let b = _mm_maskz_load_sh(0, addr_of!(a).cast()); + assert_eq_m128h(_mm_setzero_ph(), b); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_loadu_ph() { + let array = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; + let r = _mm_loadu_ph(array.as_ptr()); + let e = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_loadu_ph() { + let array = [ + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ]; + let r = _mm256_loadu_ph(array.as_ptr()); + let e = _mm256_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_loadu_ph() { + let array = [ + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ]; + let r = _mm512_loadu_ph(array.as_ptr()); + let e = _mm512_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_move_sh() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_sh(9.0); + let r = _mm_move_sh(a, b); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_move_sh() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_sh(9.0); + let src = _mm_set_sh(10.0); + let r = _mm_mask_move_sh(src, 0, a, b); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 10.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_move_sh() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_sh(9.0); + let r = _mm_maskz_move_sh(0, a, b); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_store_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let mut b = _mm_setzero_ph(); + _mm_store_ph(addr_of_mut!(b).cast(), a); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_store_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let mut b = _mm256_setzero_ph(); + _mm256_store_ph(addr_of_mut!(b).cast(), a); + assert_eq_m256h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_store_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let mut b = _mm512_setzero_ph(); + _mm512_store_ph(addr_of_mut!(b).cast(), a); + assert_eq_m512h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_store_sh() { + let a = _mm_set_sh(1.0); + let mut b = _mm_setzero_ph(); + _mm_store_sh(addr_of_mut!(b).cast(), a); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_store_sh() { + let a = _mm_set_sh(1.0); + let mut b = _mm_setzero_ph(); + _mm_mask_store_sh(addr_of_mut!(b).cast(), 0, a); + assert_eq_m128h(_mm_setzero_ph(), b); + _mm_mask_store_sh(addr_of_mut!(b).cast(), 1, a); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_storeu_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let mut array = [0.0; 8]; + _mm_storeu_ph(array.as_mut_ptr(), a); + assert_eq_m128h(a, _mm_loadu_ph(array.as_ptr())); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_storeu_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let mut array = [0.0; 16]; + _mm256_storeu_ph(array.as_mut_ptr(), a); + assert_eq_m256h(a, _mm256_loadu_ph(array.as_ptr())); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_storeu_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let mut array = [0.0; 32]; + _mm512_storeu_ph(array.as_mut_ptr(), a); + assert_eq_m512h(a, _mm512_loadu_ph(array.as_ptr())); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_add_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_add_ph(a, b); + let e = _mm_set1_ph(9.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_add_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_add_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(10., 9., 12., 9., 14., 9., 16., 9.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_add_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_maskz_add_ph(0b01010101, a, b); + let e = _mm_set_ph(0., 9., 0., 9., 0., 9., 0., 9.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_add_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_add_ph(a, b); + let e = _mm256_set1_ph(17.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_add_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let src = _mm256_set_ph( + 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + ); + let r = _mm256_mask_add_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 18., 17., 20., 17., 22., 17., 24., 17., 26., 17., 28., 17., 30., 17., 32., 17., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_add_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_maskz_add_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_add_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_add_ph(a, b); + let e = _mm512_set1_ph(33.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_add_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_add_ph(src, 0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 34., 33., 36., 33., 38., 33., 40., 33., 42., 33., 44., 33., 46., 33., 48., 33., 50., + 33., 52., 33., 54., 33., 56., 33., 58., 33., 60., 33., 62., 33., 64., 33., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_add_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_add_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., + 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_add_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_ph(33.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_add_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 34., 33., 36., 33., 38., 33., 40., 33., 42., 33., 44., 33., 46., 33., 48., 33., 50., + 33., 52., 33., 54., 33., 56., 33., 58., 33., 60., 33., 62., 33., 64., 33., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_add_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., + 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_add_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_add_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_add_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = + _mm_maskz_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_add_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_add_sh(a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_add_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_add_sh(src, 0, a, b); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_add_sh(src, 1, a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_add_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_maskz_add_sh(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_add_sh(1, a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_sub_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_sub_ph(a, b); + let e = _mm_set_ph(-7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_sub_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_sub_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(10., -5., 12., -1., 14., 3., 16., 7.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_sub_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_maskz_sub_ph(0b01010101, a, b); + let e = _mm_set_ph(0., -5., 0., -1., 0., 3., 0., 7.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_sub_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_sub_ph(a, b); + let e = _mm256_set_ph( + -15.0, -13.0, -11.0, -9.0, -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, + 15.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_sub_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let src = _mm256_set_ph( + 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + ); + let r = _mm256_mask_sub_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 18., -13., 20., -9., 22., -5., 24., -1., 26., 3., 28., 7., 30., 11., 32., 15., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_sub_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_maskz_sub_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., 0., 7., 0., 11., 0., 15., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_sub_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_sub_ph(a, b); + let e = _mm512_set_ph( + -31.0, -29.0, -27.0, -25.0, -23.0, -21.0, -19.0, -17.0, -15.0, -13.0, -11.0, -9.0, + -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, + 23.0, 25.0, 27.0, 29.0, 31.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_sub_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_sub_ph(src, 0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 34., -29., 36., -25., 38., -21., 40., -17., 42., -13., 44., -9., 46., -5., 48., -1., + 50., 3., 52., 7., 54., 11., 56., 15., 58., 19., 60., 23., 62., 27., 64., 31., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_sub_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_sub_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0., -29., 0., -25., 0., -21., 0., -17., 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., + 0., 7., 0., 11., 0., 15., 0., 19., 0., 23., 0., 27., 0., 31., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_sub_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set_ph( + -31.0, -29.0, -27.0, -25.0, -23.0, -21.0, -19.0, -17.0, -15.0, -13.0, -11.0, -9.0, + -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, + 23.0, 25.0, 27.0, 29.0, 31.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_sub_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 34., -29., 36., -25., 38., -21., 40., -17., 42., -13., 44., -9., 46., -5., 48., -1., + 50., 3., 52., 7., 54., 11., 56., 15., 58., 19., 60., 23., 62., 27., 64., 31., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_sub_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 0., -29., 0., -25., 0., -21., 0., -17., 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., + 0., 7., 0., 11., 0., 15., 0., 19., 0., 23., 0., 27., 0., 31., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_sub_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_sub_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_sub_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = + _mm_maskz_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_sub_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_sub_sh(a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_sub_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_sub_sh(src, 0, a, b); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_sub_sh(src, 1, a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_sub_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_maskz_sub_sh(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_sub_sh(1, a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mul_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_mul_ph(a, b); + let e = _mm_set_ph(8.0, 14.0, 18.0, 20.0, 20.0, 18.0, 14.0, 8.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_mul_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_mul_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(10., 14., 12., 20., 14., 18., 16., 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_mul_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_maskz_mul_ph(0b01010101, a, b); + let e = _mm_set_ph(0., 14., 0., 20., 0., 18., 0., 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mul_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_mul_ph(a, b); + let e = _mm256_set_ph( + 16.0, 30.0, 42.0, 52.0, 60.0, 66.0, 70.0, 72.0, 72.0, 70.0, 66.0, 60.0, 52.0, 42.0, + 30.0, 16.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_mul_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let src = _mm256_set_ph( + 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + ); + let r = _mm256_mask_mul_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 18., 30., 20., 52., 22., 66., 24., 72., 26., 70., 28., 60., 30., 42., 32., 16., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_mul_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_maskz_mul_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0., 30., 0., 52., 0., 66., 0., 72., 0., 70., 0., 60., 0., 42., 0., 16., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mul_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_mul_ph(a, b); + let e = _mm512_set_ph( + 32.0, 62.0, 90.0, 116.0, 140.0, 162.0, 182.0, 200.0, 216.0, 230.0, 242.0, 252.0, 260.0, + 266.0, 270.0, 272.0, 272.0, 270.0, 266.0, 260.0, 252.0, 242.0, 230.0, 216.0, 200.0, + 182.0, 162.0, 140.0, 116.0, 90.0, 62.0, 32.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_mul_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_mul_ph(src, 0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 34., 62., 36., 116., 38., 162., 40., 200., 42., 230., 44., 252., 46., 266., 48., 272., + 50., 270., 52., 260., 54., 242., 56., 216., 58., 182., 60., 140., 62., 90., 64., 32., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_mul_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_mul_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0., 62., 0., 116., 0., 162., 0., 200., 0., 230., 0., 252., 0., 266., 0., 272., 0., + 270., 0., 260., 0., 242., 0., 216., 0., 182., 0., 140., 0., 90., 0., 32., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mul_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set_ph( + 32.0, 62.0, 90.0, 116.0, 140.0, 162.0, 182.0, 200.0, 216.0, 230.0, 242.0, 252.0, 260.0, + 266.0, 270.0, 272.0, 272.0, 270.0, 266.0, 260.0, 252.0, 242.0, 230.0, 216.0, 200.0, + 182.0, 162.0, 140.0, 116.0, 90.0, 62.0, 32.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_mul_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 34., 62., 36., 116., 38., 162., 40., 200., 42., 230., 44., 252., 46., 266., 48., 272., + 50., 270., 52., 260., 54., 242., 56., 216., 58., 182., 60., 140., 62., 90., 64., 32., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_mul_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 0., 62., 0., 116., 0., 162., 0., 200., 0., 230., 0., 252., 0., 266., 0., 272., 0., + 270., 0., 260., 0., 242., 0., 216., 0., 182., 0., 140., 0., 90., 0., 32., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mul_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_mul_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_mul_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = + _mm_maskz_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mul_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_mul_sh(a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_mul_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_mul_sh(src, 0, a, b); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_mul_sh(src, 1, a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_mul_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_maskz_mul_sh(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_mul_sh(1, a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_div_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let r = _mm_div_ph(a, b); + let e = _mm_set1_ph(0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_div_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let src = _mm_set_ph(4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0); + let r = _mm_mask_div_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_div_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let r = _mm_maskz_div_ph(0b01010101, a, b); + let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_div_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let r = _mm256_div_ph(a, b); + let e = _mm256_set1_ph(0.5); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_div_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let src = _mm256_set_ph( + 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, + 19.0, + ); + let r = _mm256_mask_div_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_div_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let r = _mm256_maskz_div_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_div_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_div_ph(a, b); + let e = _mm512_set1_ph(0.5); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_div_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let src = _mm512_set_ph( + 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, + 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, + 33.0, 34.0, 35.0, + ); + let r = _mm512_mask_div_ph(src, 0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, + 20.0, 0.5, 22.0, 0.5, 24.0, 0.5, 26.0, 0.5, 28.0, 0.5, 30.0, 0.5, 32.0, 0.5, 34.0, 0.5, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_div_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_maskz_div_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, + 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_div_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_ph(0.5); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_div_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let src = _mm512_set_ph( + 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, + 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, + 33.0, 34.0, 35.0, + ); + let r = _mm512_mask_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, + 20.0, 0.5, 22.0, 0.5, 24.0, 0.5, 26.0, 0.5, 28.0, 0.5, 30.0, 0.5, 32.0, 0.5, 34.0, 0.5, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_div_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_maskz_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, + 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_div_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_div_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_set_sh(0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_div_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = + _mm_maskz_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_div_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_div_sh(a, b); + let e = _mm_set_sh(0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_div_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_div_sh(src, 0, a, b); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_div_sh(src, 1, a, b); + let e = _mm_set_sh(0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_div_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_maskz_div_sh(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_div_sh(1, a, b); + let e = _mm_set_sh(0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let r = _mm_mul_pch(a, b); + let e = _mm_set1_pch(-1.0, 0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_mul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); + let r = _mm_mask_mul_pch(src, 0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_mul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let r = _mm_maskz_mul_pch(0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_mul_pch(a, b); + let e = _mm256_set1_pch(-1.0, 0.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_mul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let src = _mm256_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + ); + let r = _mm256_mask_mul_pch(src, 0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_mul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_maskz_mul_pch(0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + ); + assert_eq_m256h(r, e); + } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_setr_ph() { - let r = _mm512_setr_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, + unsafe fn test_mm512_mul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_mul_pch(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_mul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, ); - let e = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, + let r = _mm512_mask_mul_pch(src, 0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_setzero_ph() { - let r = _mm_setzero_ph(); - let e = _mm_set1_ph(0.0); - assert_eq_m128h(r, e); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_mul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_mul_pch(0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + ); + assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_setzero_ph() { - let r = _mm256_setzero_ph(); - let e = _mm256_set1_ph(0.0); - assert_eq_m256h(r, e); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_setzero_ph() { - let r = _mm512_setzero_ph(); - let e = _mm512_set1_ph(0.0); + unsafe fn test_mm512_mask_mul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, + ); + let r = _mm512_mask_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b0101010101010101, + a, + b, + ); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castsi128_ph() { - let a = _mm_set1_epi16(0x3c00); - let r = _mm_castsi128_ph(a); - let e = _mm_set1_ph(1.0); + unsafe fn test_mm512_maskz_mul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, + a, + b, + ); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castsi256_ph() { - let a = _mm256_set1_epi16(0x3c00); - let r = _mm256_castsi256_ph(a); - let e = _mm256_set1_ph(1.0); - assert_eq_m256h(r, e); + unsafe fn test_mm_mask_mul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castsi512_ph() { - let a = _mm512_set1_epi16(0x3c00); - let r = _mm512_castsi512_ph(a); - let e = _mm512_set1_ph(1.0); - assert_eq_m512h(r, e); + unsafe fn test_mm_maskz_mul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = + _mm_maskz_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castph_si128() { - let a = _mm_set1_ph(1.0); - let r = _mm_castph_si128(a); - let e = _mm_set1_epi16(0x3c00); - assert_eq_m128i(r, e); + unsafe fn test_mm_mul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_mul_sch(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph_si256() { - let a = _mm256_set1_ph(1.0); - let r = _mm256_castph_si256(a); - let e = _mm256_set1_epi16(0x3c00); - assert_eq_m256i(r, e); + unsafe fn test_mm_mask_mul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_mul_sch(src, 0, a, b); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph_si512() { - let a = _mm512_set1_ph(1.0); - let r = _mm512_castph_si512(a); - let e = _mm512_set1_epi16(0x3c00); - assert_eq_m512i(r, e); + unsafe fn test_mm_maskz_mul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_maskz_mul_sch(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_fmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let r = _mm_fmul_pch(a, b); + let e = _mm_set1_pch(-1.0, 0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_fmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); + let r = _mm_mask_fmul_pch(src, 0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_fmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let r = _mm_maskz_fmul_pch(0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_fmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_fmul_pch(a, b); + let e = _mm256_set1_pch(-1.0, 0.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_fmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let src = _mm256_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + ); + let r = _mm256_mask_fmul_pch(src, 0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_fmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_maskz_fmul_pch(0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_fmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_fmul_pch(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_fmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, + ); + let r = _mm512_mask_fmul_pch(src, 0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castps_ph() { - let a = _mm_castsi128_ps(_mm_set1_epi16(0x3c00)); - let r = _mm_castps_ph(a); - let e = _mm_set1_ph(1.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_maskz_fmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_fmul_pch(0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castps_ph() { - let a = _mm256_castsi256_ps(_mm256_set1_epi16(0x3c00)); - let r = _mm256_castps_ph(a); - let e = _mm256_set1_ph(1.0); - assert_eq_m256h(r, e); + unsafe fn test_mm512_fmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castps_ph() { - let a = _mm512_castsi512_ps(_mm512_set1_epi16(0x3c00)); - let r = _mm512_castps_ph(a); - let e = _mm512_set1_ph(1.0); + unsafe fn test_mm512_mask_fmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, + ); + let r = _mm512_mask_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b0101010101010101, + a, + b, + ); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castph_ps() { - let a = _mm_castsi128_ph(_mm_set1_epi32(0x3f800000)); - let r = _mm_castph_ps(a); - let e = _mm_set1_ps(1.0); - assert_eq_m128(r, e); + unsafe fn test_mm512_maskz_fmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, + a, + b, + ); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph_ps() { - let a = _mm256_castsi256_ph(_mm256_set1_epi32(0x3f800000)); - let r = _mm256_castph_ps(a); - let e = _mm256_set1_ps(1.0); - assert_eq_m256(r, e); + unsafe fn test_mm_fmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph_ps() { - let a = _mm512_castsi512_ph(_mm512_set1_epi32(0x3f800000)); - let r = _mm512_castph_ps(a); - let e = _mm512_set1_ps(1.0); - assert_eq_m512(r, e); + unsafe fn test_mm_mask_fmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castpd_ph() { - let a = _mm_castsi128_pd(_mm_set1_epi16(0x3c00)); - let r = _mm_castpd_ph(a); - let e = _mm_set1_ph(1.0); + unsafe fn test_mm_maskz_fmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = + _mm_maskz_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castpd_ph() { - let a = _mm256_castsi256_pd(_mm256_set1_epi16(0x3c00)); - let r = _mm256_castpd_ph(a); - let e = _mm256_set1_ph(1.0); - assert_eq_m256h(r, e); + unsafe fn test_mm_fmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_fmul_sch(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castpd_ph() { - let a = _mm512_castsi512_pd(_mm512_set1_epi16(0x3c00)); - let r = _mm512_castpd_ph(a); - let e = _mm512_set1_ph(1.0); - assert_eq_m512h(r, e); + unsafe fn test_mm_mask_fmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_fmul_sch(src, 0, a, b); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castph_pd() { - let a = _mm_castsi128_ph(_mm_set1_epi64x(0x3ff0000000000000)); - let r = _mm_castph_pd(a); - let e = _mm_set1_pd(1.0); - assert_eq_m128d(r, e); + unsafe fn test_mm_maskz_fmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_maskz_fmul_sch(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph_pd() { - let a = _mm256_castsi256_ph(_mm256_set1_epi64x(0x3ff0000000000000)); - let r = _mm256_castph_pd(a); - let e = _mm256_set1_pd(1.0); - assert_eq_m256d(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_cmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let r = _mm_cmul_pch(a, b); + let e = _mm_set1_pch(-1.0, 0.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph_pd() { - let a = _mm512_castsi512_ph(_mm512_set1_epi64(0x3ff0000000000000)); - let r = _mm512_castph_pd(a); - let e = _mm512_set1_pd(1.0); - assert_eq_m512d(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_cmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); + let r = _mm_mask_cmul_pch(src, 0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph256_ph128() { - let a = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., - ); - let r = _mm256_castph256_ph128(a); - let e = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_cmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let r = _mm_maskz_cmul_pch(0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph512_ph128() { - let a = _mm512_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., - 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., - ); - let r = _mm512_castph512_ph128(a); - let e = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - assert_eq_m128h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_cmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let r = _mm256_cmul_pch(a, b); + let e = _mm256_set1_pch(-1.0, 0.0); + assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph512_ph256() { - let a = _mm512_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., - 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_cmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let src = _mm256_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, ); - let r = _mm512_castph512_ph256(a); + let r = _mm256_mask_cmul_pch(src, 0b01010101, a, b); let e = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_cmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let r = _mm256_maskz_cmul_pch(0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph128_ph256() { - let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - let r = _mm256_castph128_ph256(a); - assert_eq_m128h(_mm256_castph256_ph128(r), a); + unsafe fn test_mm512_cmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_cmul_pch(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph128_ph512() { - let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - let r = _mm512_castph128_ph512(a); - assert_eq_m128h(_mm512_castph512_ph128(r), a); + unsafe fn test_mm512_mask_cmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, + ); + let r = _mm512_mask_cmul_pch(src, 0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph256_ph512() { - let a = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + unsafe fn test_mm512_maskz_cmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_maskz_cmul_pch(0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); - let r = _mm512_castph256_ph512(a); - assert_eq_m256h(_mm512_castph512_ph256(r), a); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_zextph128_ph256() { - let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - let r = _mm256_zextph128_ph256(a); - let e = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 0., 0., 0., 0., 0., 0., 0., 0., - ); - assert_eq_m256h(r, e); + unsafe fn test_mm512_cmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_zextph128_ph512() { - let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - let r = _mm512_zextph128_ph512(a); + unsafe fn test_mm512_mask_cmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, + ); + let r = _mm512_mask_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b0101010101010101, + a, + b, + ); let e = _mm512_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., - 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_zextph256_ph512() { - let a = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + unsafe fn test_mm512_maskz_cmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_maskz_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, + a, + b, ); - let r = _mm512_zextph256_ph512(a); let e = _mm512_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 0., 0., 0., 0., - 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cmp_round_sh_mask() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_cmp_round_sh_mask::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(a, b); - assert_eq!(r, 1); + unsafe fn test_mm_cmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_cmul_sch(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_cmp_round_sh_mask() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_mask_cmp_round_sh_mask::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(0, a, b); - assert_eq!(r, 0); + unsafe fn test_mm_mask_cmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_cmul_sch(src, 0, a, b); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cmp_sh_mask() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_cmp_sh_mask::<_CMP_EQ_OQ>(a, b); - assert_eq!(r, 1); + unsafe fn test_mm_maskz_cmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_maskz_cmul_sch(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_cmp_sh_mask() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_mask_cmp_sh_mask::<_CMP_EQ_OQ>(0, a, b); - assert_eq!(r, 0); + unsafe fn test_mm_cmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comi_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_comi_round_sh::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(a, b); - assert_eq!(r, 1); + unsafe fn test_mm_mask_cmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comi_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_comi_sh::<_CMP_EQ_OQ>(a, b); - assert_eq!(r, 1); + unsafe fn test_mm_maskz_cmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = + _mm_maskz_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comieq_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_comieq_sh(a, b); - assert_eq!(r, 1); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_fcmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let r = _mm_fcmul_pch(a, b); + let e = _mm_set1_pch(-1.0, 0.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comige_sh() { - let a = _mm_set_sh(2.0); - let b = _mm_set_sh(1.0); - let r = _mm_comige_sh(a, b); - assert_eq!(r, 1); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_fcmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); + let r = _mm_mask_fcmul_pch(src, 0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comigt_sh() { - let a = _mm_set_sh(2.0); - let b = _mm_set_sh(1.0); - let r = _mm_comigt_sh(a, b); - assert_eq!(r, 1); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_fcmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let r = _mm_maskz_fcmul_pch(0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comile_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_comile_sh(a, b); - assert_eq!(r, 1); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_fcmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let r = _mm256_fcmul_pch(a, b); + let e = _mm256_set1_pch(-1.0, 0.0); + assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comilt_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_comilt_sh(a, b); - assert_eq!(r, 1); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_fcmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let src = _mm256_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + ); + let r = _mm256_mask_fcmul_pch(src, 0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_fcmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let r = _mm256_maskz_fcmul_pch(0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comineq_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_comineq_sh(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_fcmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_fcmul_pch(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomieq_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_ucomieq_sh(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_mask_fcmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, + ); + let r = _mm512_mask_fcmul_pch(src, 0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomige_sh() { - let a = _mm_set_sh(2.0); - let b = _mm_set_sh(1.0); - let r = _mm_ucomige_sh(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_maskz_fcmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_maskz_fcmul_pch(0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomigt_sh() { - let a = _mm_set_sh(2.0); - let b = _mm_set_sh(1.0); - let r = _mm_ucomigt_sh(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_fcmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomile_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_ucomile_sh(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_mask_fcmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, + ); + let r = _mm512_mask_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b0101010101010101, + a, + b, + ); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomilt_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_ucomilt_sh(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_maskz_fcmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_maskz_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, + a, + b, + ); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomineq_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_ucomineq_sh(a, b); - assert_eq!(r, 1); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_load_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_load_ph(addr_of!(a).cast()); - assert_eq_m128h(a, b); + unsafe fn test_mm_fcmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_fcmul_sch(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_load_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_load_ph(addr_of!(a).cast()); - assert_eq_m256h(a, b); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_fcmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_fcmul_sch(src, 0, a, b); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_load_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_load_ph(addr_of!(a).cast()); - assert_eq_m512h(a, b); + unsafe fn test_mm_maskz_fcmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_maskz_fcmul_sch(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_load_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_load_sh(addr_of!(a).cast()); - assert_eq_m128h(a, b); + unsafe fn test_mm_fcmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_load_sh() { - let a = _mm_set_sh(1.0); - let src = _mm_set_sh(2.); - let b = _mm_mask_load_sh(src, 1, addr_of!(a).cast()); - assert_eq_m128h(a, b); - let b = _mm_mask_load_sh(src, 0, addr_of!(a).cast()); - assert_eq_m128h(src, b); + unsafe fn test_mm_mask_fcmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_load_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_maskz_load_sh(1, addr_of!(a).cast()); - assert_eq_m128h(a, b); - let b = _mm_maskz_load_sh(0, addr_of!(a).cast()); - assert_eq_m128h(_mm_setzero_ph(), b); + unsafe fn test_mm_maskz_fcmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = + _mm_maskz_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_loadu_ph() { - let array = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; - let r = _mm_loadu_ph(array.as_ptr()); - let e = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + unsafe fn test_mm_abs_ph() { + let a = _mm_set_ph(-1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0); + let r = _mm_abs_ph(a); + let e = _mm_set_ph(1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_loadu_ph() { - let array = [ - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ]; - let r = _mm256_loadu_ph(array.as_ptr()); - let e = _mm256_setr_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + unsafe fn test_mm256_abs_ph() { + let a = _mm256_set_ph( + -1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, + -14.0, + ); + let r = _mm256_abs_ph(a); + let e = _mm256_set_ph( + 1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_loadu_ph() { - let array = [ - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ]; - let r = _mm512_loadu_ph(array.as_ptr()); - let e = _mm512_setr_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, + unsafe fn test_mm512_abs_ph() { + let a = _mm512_set_ph( + -1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, + -14.0, 15.0, -16.0, 17.0, -18.0, 19.0, -20.0, 21.0, -22.0, 23.0, -24.0, 25.0, -26.0, + 27.0, -28.0, 29.0, -30.0, + ); + let r = _mm512_abs_ph(a); + let e = _mm512_set_ph( + 1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, + 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, + 29.0, 30.0, ); assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_move_sh() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_sh(9.0); - let r = _mm_move_sh(a, b); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_conj_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let r = _mm_conj_pch(a); + let e = _mm_set1_pch(0.0, -1.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_move_sh() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_sh(9.0); - let src = _mm_set_sh(10.0); - let r = _mm_mask_move_sh(src, 0, a, b); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 10.0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_conj_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); + let r = _mm_mask_conj_pch(src, 0b0101, a); + let e = _mm_setr_ph(0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_move_sh() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_sh(9.0); - let r = _mm_maskz_move_sh(0, a, b); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 0.0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_conj_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let r = _mm_maskz_conj_pch(0b0101, a); + let e = _mm_setr_ph(0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_store_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let mut b = _mm_setzero_ph(); - _mm_store_ph(addr_of_mut!(b).cast(), a); - assert_eq_m128h(a, b); + unsafe fn test_mm256_conj_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_conj_pch(a); + let e = _mm256_set1_pch(0.0, -1.0); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_store_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + unsafe fn test_mm256_mask_conj_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let src = _mm256_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, ); - let mut b = _mm256_setzero_ph(); - _mm256_store_ph(addr_of_mut!(b).cast(), a); - assert_eq_m256h(a, b); + let r = _mm256_mask_conj_pch(src, 0b01010101, a); + let e = _mm256_setr_ph( + 0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0, 0.0, -1.0, 12.0, 13.0, 0.0, -1.0, 16.0, 17.0, + ); + assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_store_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_conj_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_maskz_conj_pch(0b01010101, a); + let e = _mm256_setr_ph( + 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, ); - let mut b = _mm512_setzero_ph(); - _mm512_store_ph(addr_of_mut!(b).cast(), a); - assert_eq_m512h(a, b); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_store_sh() { - let a = _mm_set_sh(1.0); - let mut b = _mm_setzero_ph(); - _mm_store_sh(addr_of_mut!(b).cast(), a); - assert_eq_m128h(a, b); + unsafe fn test_mm512_conj_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_conj_pch(a); + let e = _mm512_set1_pch(0.0, -1.0); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_store_sh() { - let a = _mm_set_sh(1.0); - let mut b = _mm_setzero_ph(); - _mm_mask_store_sh(addr_of_mut!(b).cast(), 0, a); - assert_eq_m128h(_mm_setzero_ph(), b); - _mm_mask_store_sh(addr_of_mut!(b).cast(), 1, a); - assert_eq_m128h(a, b); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_storeu_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let mut array = [0.0; 8]; - _mm_storeu_ph(array.as_mut_ptr(), a); - assert_eq_m128h(a, _mm_loadu_ph(array.as_ptr())); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_storeu_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + unsafe fn test_mm512_mask_conj_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, ); - let mut array = [0.0; 16]; - _mm256_storeu_ph(array.as_mut_ptr(), a); - assert_eq_m256h(a, _mm256_loadu_ph(array.as_ptr())); + let r = _mm512_mask_conj_pch(src, 0b0101010101010101, a); + let e = _mm512_setr_ph( + 0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0, 0.0, -1.0, 12.0, 13.0, 0.0, -1.0, 16.0, 17.0, + 0.0, -1.0, 20.0, 21.0, 0.0, -1.0, 24.0, 25.0, 0.0, -1.0, 28.0, 29.0, 0.0, -1.0, 32.0, + 33.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_storeu_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, + unsafe fn test_mm512_maskz_conj_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_conj_pch(0b0101010101010101, a); + let e = _mm512_setr_ph( + 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, + 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, ); - let mut array = [0.0; 32]; - _mm512_storeu_ph(array.as_mut_ptr(), a); - assert_eq_m512h(a, _mm512_loadu_ph(array.as_ptr())); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_add_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_add_ph(a, b); - let e = _mm_set1_ph(9.0); + unsafe fn test_mm_fmadd_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_fmadd_pch(a, b, c); + let e = _mm_set1_pch(-2.0, 3.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_add_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm_mask_add_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(10., 9., 12., 9., 14., 9., 16., 9.); + unsafe fn test_mm_mask_fmadd_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_mask_fmadd_pch(a, 0b0101, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_add_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_maskz_add_ph(0b01010101, a, b); - let e = _mm_set_ph(0., 9., 0., 9., 0., 9., 0., 9.); + unsafe fn test_mm_mask3_fmadd_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_mask3_fmadd_pch(a, b, c, 0b0101); + let e = _mm_setr_ph(-2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_add_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_add_ph(a, b); - let e = _mm256_set1_ph(17.0); + unsafe fn test_mm_maskz_fmadd_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_maskz_fmadd_pch(0b0101, a, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_fmadd_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_fmadd_pch(a, b, c); + let e = _mm256_set1_pch(-2.0, 3.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_add_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let src = _mm256_set_ph( - 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., - ); - let r = _mm256_mask_add_ph(src, 0b0101010101010101, a, b); - let e = _mm256_set_ph( - 18., 17., 20., 17., 22., 17., 24., 17., 26., 17., 28., 17., 30., 17., 32., 17., + unsafe fn test_mm256_mask_fmadd_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_mask_fmadd_pch(a, 0b01010101, b, c); + let e = _mm256_setr_ph( + -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_add_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + unsafe fn test_mm256_mask3_fmadd_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_mask3_fmadd_pch(a, b, c, 0b01010101); + let e = _mm256_setr_ph( + -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, ); - let r = _mm256_maskz_add_ph(0b0101010101010101, a, b); - let e = _mm256_set_ph( - 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_fmadd_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_maskz_fmadd_pch(0b01010101, a, b, c); + let e = _mm256_setr_ph( + -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_add_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_add_ph(a, b); - let e = _mm512_set1_ph(33.0); + unsafe fn test_mm512_fmadd_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_fmadd_pch(a, b, c); + let e = _mm512_set1_pch(-2.0, 3.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_add_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., - ); - let r = _mm512_mask_add_ph(src, 0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 34., 33., 36., 33., 38., 33., 40., 33., 42., 33., 44., 33., 46., 33., 48., 33., 50., - 33., 52., 33., 54., 33., 56., 33., 58., 33., 60., 33., 62., 33., 64., 33., + unsafe fn test_mm512_mask_fmadd_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask_fmadd_pch(a, 0b0101010101010101, b, c); + let e = _mm512_setr_ph( + -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, + -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_add_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_add_ph(0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., - 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., + unsafe fn test_mm512_mask3_fmadd_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask3_fmadd_pch(a, b, c, 0b0101010101010101); + let e = _mm512_setr_ph( + -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, + -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_add_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, + unsafe fn test_mm512_maskz_fmadd_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_maskz_fmadd_pch(0b0101010101010101, a, b, c); + let e = _mm512_setr_ph( + -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, + -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, ); - let r = _mm512_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_ph(33.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_add_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., - ); - let r = _mm512_mask_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, - 0b01010101010101010101010101010101, + unsafe fn test_mm512_fmadd_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = + _mm512_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_pch(-2.0, 3.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_fmadd_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, + 0b0101010101010101, b, + c, ); - let e = _mm512_set_ph( - 34., 33., 36., 33., 38., 33., 40., 33., 42., 33., 44., 33., 46., 33., 48., 33., 50., - 33., 52., 33., 54., 33., 56., 33., 58., 33., 60., 33., 62., 33., 64., 33., + let e = _mm512_setr_ph( + -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, + -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_add_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, + unsafe fn test_mm512_mask3_fmadd_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask3_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, + b, + c, + 0b0101010101010101, ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, + let e = _mm512_setr_ph( + -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, + -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, ); - let r = _mm512_maskz_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_fmadd_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_maskz_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, a, b, + c, ); - let e = _mm512_set_ph( - 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., - 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., + let e = _mm512_setr_ph( + -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, + -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_add_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_set_sh(3.0); + unsafe fn test_mm_fmadd_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_fmadd_sch(a, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_add_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_set_sh(4.0); + unsafe fn test_mm_mask_fmadd_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask_fmadd_sch(a, 0, b, c); + let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); - let r = _mm_mask_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_set_sh(3.0); + let r = _mm_mask_fmadd_sch(a, 1, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_add_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = - _mm_maskz_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_set_sh(0.0); + unsafe fn test_mm_mask3_fmadd_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask3_fmadd_sch(a, b, c, 0); + let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); assert_eq_m128h(r, e); - let r = - _mm_maskz_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_set_sh(3.0); + let r = _mm_mask3_fmadd_sch(a, b, c, 1); + let e = _mm_setr_ph(-2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_add_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_add_sh(a, b); - let e = _mm_set_sh(3.0); + unsafe fn test_mm_maskz_fmadd_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_maskz_fmadd_sch(0, a, b, c); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_fmadd_sch(1, a, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_fmadd_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_fmadd_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0, b, c, + ); + let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + let r = _mm_mask_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 1, b, c, + ); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_add_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_add_sh(src, 0, a, b); - let e = _mm_set_sh(4.0); + unsafe fn test_mm_mask3_fmadd_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask3_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 0, + ); + let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); assert_eq_m128h(r, e); - let r = _mm_mask_add_sh(src, 1, a, b); - let e = _mm_set_sh(3.0); + let r = _mm_mask3_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 1, + ); + let e = _mm_setr_ph(-2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_add_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_maskz_add_sh(0, a, b); - let e = _mm_set_sh(0.0); + unsafe fn test_mm_maskz_fmadd_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_maskz_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0, a, b, c, + ); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); - let r = _mm_maskz_add_sh(1, a, b); - let e = _mm_set_sh(3.0); + let r = _mm_maskz_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 1, a, b, c, + ); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_sub_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_sub_ph(a, b); - let e = _mm_set_ph(-7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0); + unsafe fn test_mm_fcmadd_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_fcmadd_pch(a, b, c); + let e = _mm_set1_pch(2.0, 3.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_sub_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm_mask_sub_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(10., -5., 12., -1., 14., 3., 16., 7.); + unsafe fn test_mm_mask_fcmadd_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_mask_fcmadd_pch(a, 0b0101, b, c); + let e = _mm_setr_ph(2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_sub_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_maskz_sub_ph(0b01010101, a, b); - let e = _mm_set_ph(0., -5., 0., -1., 0., 3., 0., 7.); + unsafe fn test_mm_mask3_fcmadd_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_mask3_fcmadd_pch(a, b, c, 0b0101); + let e = _mm_setr_ph(2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_sub_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_sub_ph(a, b); - let e = _mm256_set_ph( - -15.0, -13.0, -11.0, -9.0, -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, - 15.0, - ); + unsafe fn test_mm_maskz_fcmadd_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_maskz_fcmadd_pch(0b0101, a, b, c); + let e = _mm_setr_ph(2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_fcmadd_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_fcmadd_pch(a, b, c); + let e = _mm256_set1_pch(2.0, 3.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_sub_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let src = _mm256_set_ph( - 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., - ); - let r = _mm256_mask_sub_ph(src, 0b0101010101010101, a, b); - let e = _mm256_set_ph( - 18., -13., 20., -9., 22., -5., 24., -1., 26., 3., 28., 7., 30., 11., 32., 15., + unsafe fn test_mm256_mask_fcmadd_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_mask_fcmadd_pch(a, 0b01010101, b, c); + let e = _mm256_setr_ph( + 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_sub_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + unsafe fn test_mm256_mask3_fcmadd_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_mask3_fcmadd_pch(a, b, c, 0b01010101); + let e = _mm256_setr_ph( + 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, ); - let r = _mm256_maskz_sub_ph(0b0101010101010101, a, b); - let e = _mm256_set_ph( - 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., 0., 7., 0., 11., 0., 15., + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_fcmadd_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_maskz_fcmadd_pch(0b01010101, a, b, c); + let e = _mm256_setr_ph( + 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_sub_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_sub_ph(a, b); - let e = _mm512_set_ph( - -31.0, -29.0, -27.0, -25.0, -23.0, -21.0, -19.0, -17.0, -15.0, -13.0, -11.0, -9.0, - -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, - 23.0, 25.0, 27.0, 29.0, 31.0, - ); + unsafe fn test_mm512_fcmadd_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_fcmadd_pch(a, b, c); + let e = _mm512_set1_pch(2.0, 3.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_sub_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., - ); - let r = _mm512_mask_sub_ph(src, 0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 34., -29., 36., -25., 38., -21., 40., -17., 42., -13., 44., -9., 46., -5., 48., -1., - 50., 3., 52., 7., 54., 11., 56., 15., 58., 19., 60., 23., 62., 27., 64., 31., + unsafe fn test_mm512_mask_fcmadd_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask_fcmadd_pch(a, 0b0101010101010101, b, c); + let e = _mm512_setr_ph( + 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, + 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_sub_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_sub_ph(0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 0., -29., 0., -25., 0., -21., 0., -17., 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., - 0., 7., 0., 11., 0., 15., 0., 19., 0., 23., 0., 27., 0., 31., + unsafe fn test_mm512_mask3_fcmadd_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask3_fcmadd_pch(a, b, c, 0b0101010101010101); + let e = _mm512_setr_ph( + 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, + 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_sub_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set_ph( - -31.0, -29.0, -27.0, -25.0, -23.0, -21.0, -19.0, -17.0, -15.0, -13.0, -11.0, -9.0, - -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, - 23.0, 25.0, 27.0, 29.0, 31.0, + unsafe fn test_mm512_maskz_fcmadd_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_maskz_fcmadd_pch(0b0101010101010101, a, b, c); + let e = _mm512_setr_ph( + 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, + 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_sub_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., - ); - let r = _mm512_mask_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, - 0b01010101010101010101010101010101, + unsafe fn test_mm512_fcmadd_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = + _mm512_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_pch(2.0, 3.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_fcmadd_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, + 0b0101010101010101, b, + c, ); - let e = _mm512_set_ph( - 34., -29., 36., -25., 38., -21., 40., -17., 42., -13., 44., -9., 46., -5., 48., -1., - 50., 3., 52., 7., 54., 11., 56., 15., 58., 19., 60., 23., 62., 27., 64., 31., + let e = _mm512_setr_ph( + 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, + 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_sub_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, + unsafe fn test_mm512_mask3_fcmadd_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask3_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, + b, + c, + 0b0101010101010101, ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, + let e = _mm512_setr_ph( + 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, + 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, ); - let r = _mm512_maskz_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_fcmadd_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_maskz_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, a, b, + c, ); - let e = _mm512_set_ph( - 0., -29., 0., -25., 0., -21., 0., -17., 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., - 0., 7., 0., 11., 0., 15., 0., 19., 0., 23., 0., 27., 0., 31., + let e = _mm512_setr_ph( + 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, + 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_sub_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_set_sh(-1.0); + unsafe fn test_mm_fcmadd_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_fcmadd_sch(a, b, c); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_sub_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_set_sh(4.0); + unsafe fn test_mm_mask_fcmadd_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask_fcmadd_sch(a, 0, b, c); + let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); - let r = _mm_mask_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_set_sh(-1.0); + let r = _mm_mask_fcmadd_sch(a, 1, b, c); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask3_fcmadd_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask3_fcmadd_sch(a, b, c, 0); + let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + let r = _mm_mask3_fcmadd_sch(a, b, c, 1); + let e = _mm_setr_ph(2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_fcmadd_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_maskz_fcmadd_sch(0, a, b, c); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_fcmadd_sch(1, a, b, c); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_sub_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = - _mm_maskz_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_set_sh(0.0); - assert_eq_m128h(r, e); - let r = - _mm_maskz_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_set_sh(-1.0); + unsafe fn test_mm_fcmadd_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_sub_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_sub_sh(a, b); - let e = _mm_set_sh(-1.0); + unsafe fn test_mm_mask_fcmadd_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0, b, c, + ); + let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + let r = _mm_mask_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 1, b, c, + ); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_sub_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_sub_sh(src, 0, a, b); - let e = _mm_set_sh(4.0); + unsafe fn test_mm_mask3_fcmadd_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask3_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 0, + ); + let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); assert_eq_m128h(r, e); - let r = _mm_mask_sub_sh(src, 1, a, b); - let e = _mm_set_sh(-1.0); + let r = _mm_mask3_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 1, + ); + let e = _mm_setr_ph(2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_sub_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_maskz_sub_sh(0, a, b); - let e = _mm_set_sh(0.0); + unsafe fn test_mm_maskz_fcmadd_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_maskz_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0, a, b, c, + ); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); - let r = _mm_maskz_sub_sh(1, a, b); - let e = _mm_set_sh(-1.0); + let r = _mm_maskz_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 1, a, b, c, + ); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mul_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_mul_ph(a, b); - let e = _mm_set_ph(8.0, 14.0, 18.0, 20.0, 20.0, 18.0, 14.0, 8.0); + unsafe fn test_mm_fmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_fmadd_ph(a, b, c); + let e = _mm_set1_ph(5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_mul_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm_mask_mul_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(10., 14., 12., 20., 14., 18., 16., 8.); + unsafe fn test_mm_mask_fmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask_fmadd_ph(a, 0b01010101, b, c); + let e = _mm_set_ph(1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_mul_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_maskz_mul_ph(0b01010101, a, b); - let e = _mm_set_ph(0., 14., 0., 20., 0., 18., 0., 8.); + unsafe fn test_mm_mask3_fmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask3_fmadd_ph(a, b, c, 0b01010101); + let e = _mm_set_ph(3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mul_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_mul_ph(a, b); + unsafe fn test_mm_maskz_fmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_maskz_fmadd_ph(0b01010101, a, b, c); + let e = _mm_set_ph(0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_fmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_fmadd_ph(a, b, c); + let e = _mm256_set1_ph(5.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_fmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask_fmadd_ph(a, 0b0101010101010101, b, c); let e = _mm256_set_ph( - 16.0, 30.0, 42.0, 52.0, 60.0, 66.0, 70.0, 72.0, 72.0, 70.0, 66.0, 60.0, 52.0, 42.0, - 30.0, 16.0, + 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_mul_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let src = _mm256_set_ph( - 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., - ); - let r = _mm256_mask_mul_ph(src, 0b0101010101010101, a, b); + unsafe fn test_mm256_mask3_fmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask3_fmadd_ph(a, b, c, 0b0101010101010101); let e = _mm256_set_ph( - 18., 30., 20., 52., 22., 66., 24., 72., 26., 70., 28., 60., 30., 42., 32., 16., + 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_mul_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_maskz_mul_ph(0b0101010101010101, a, b); + unsafe fn test_mm256_maskz_fmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_maskz_fmadd_ph(0b0101010101010101, a, b, c); let e = _mm256_set_ph( - 0., 30., 0., 52., 0., 66., 0., 72., 0., 70., 0., 60., 0., 42., 0., 16., + 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mul_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_mul_ph(a, b); + unsafe fn test_mm512_fmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fmadd_ph(a, b, c); + let e = _mm512_set1_ph(5.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_fmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmadd_ph(a, 0b01010101010101010101010101010101, b, c); let e = _mm512_set_ph( - 32.0, 62.0, 90.0, 116.0, 140.0, 162.0, 182.0, 200.0, 216.0, 230.0, 242.0, 252.0, 260.0, - 266.0, 270.0, 272.0, 272.0, 270.0, 266.0, 260.0, 252.0, 242.0, 230.0, 216.0, 200.0, - 182.0, 162.0, 140.0, 116.0, 90.0, 62.0, 32.0, + 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, + 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_mul_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., - ); - let r = _mm512_mask_mul_ph(src, 0b01010101010101010101010101010101, a, b); + unsafe fn test_mm512_mask3_fmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmadd_ph(a, b, c, 0b01010101010101010101010101010101); let e = _mm512_set_ph( - 34., 62., 36., 116., 38., 162., 40., 200., 42., 230., 44., 252., 46., 266., 48., 272., - 50., 270., 52., 260., 54., 242., 56., 216., 58., 182., 60., 140., 62., 90., 64., 32., + 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, + 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_mul_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_mul_ph(0b01010101010101010101010101010101, a, b); + unsafe fn test_mm512_maskz_fmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmadd_ph(0b01010101010101010101010101010101, a, b, c); let e = _mm512_set_ph( - 0., 62., 0., 116., 0., 162., 0., 200., 0., 230., 0., 252., 0., 266., 0., 272., 0., - 270., 0., 260., 0., 242., 0., 216., 0., 182., 0., 140., 0., 90., 0., 32., + 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, + 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mul_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, + unsafe fn test_mm512_fmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_ph(5.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_fmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, + 0b01010101010101010101010101010101, + b, + c, ); - let r = _mm512_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); let e = _mm512_set_ph( - 32.0, 62.0, 90.0, 116.0, 140.0, 162.0, 182.0, 200.0, 216.0, 230.0, 242.0, 252.0, 260.0, - 266.0, 270.0, 272.0, 272.0, 270.0, 266.0, 260.0, 252.0, 242.0, 230.0, 216.0, 200.0, - 182.0, 162.0, 140.0, 116.0, 90.0, 62.0, 32.0, + 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, + 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_mul_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., - ); - let r = _mm512_mask_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, - 0b01010101010101010101010101010101, + unsafe fn test_mm512_mask3_fmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, + c, + 0b01010101010101010101010101010101, ); let e = _mm512_set_ph( - 34., 62., 36., 116., 38., 162., 40., 200., 42., 230., 44., 252., 46., 266., 48., 272., - 50., 270., 52., 260., 54., 242., 56., 216., 58., 182., 60., 140., 62., 90., 64., 32., + 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, + 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_mul_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_maskz_fmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b01010101010101010101010101010101, a, b, + c, ); let e = _mm512_set_ph( - 0., 62., 0., 116., 0., 162., 0., 200., 0., 230., 0., 252., 0., 266., 0., 272., 0., - 270., 0., 260., 0., 242., 0., 216., 0., 182., 0., 140., 0., 90., 0., 32., + 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, + 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mul_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_set_sh(2.0); + unsafe fn test_mm_fmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fmadd_sh(a, b, c); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_mul_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_set_sh(4.0); + unsafe fn test_mm_mask_fmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fmadd_sh(a, 0, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_set_sh(2.0); + let r = _mm_mask_fmadd_sh(a, 1, b, c); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask3_fmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fmadd_sh(a, b, c, 0); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + assert_eq_m128h(r, e); + let r = _mm_mask3_fmadd_sh(a, b, c, 1); + let e = _mm_setr_ph(5.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_mul_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = - _mm_maskz_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_set_sh(0.0); + unsafe fn test_mm_maskz_fmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fmadd_sh(0, a, b, c); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = - _mm_maskz_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_set_sh(2.0); + let r = _mm_maskz_fmadd_sh(1, a, b, c); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mul_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_mul_sh(a, b); - let e = _mm_set_sh(2.0); + unsafe fn test_mm_fmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_mul_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_mul_sh(src, 0, a, b); - let e = _mm_set_sh(4.0); + unsafe fn test_mm_mask_fmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0, b, c, + ); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_mul_sh(src, 1, a, b); - let e = _mm_set_sh(2.0); + let r = _mm_mask_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 1, b, c, + ); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_mul_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_maskz_mul_sh(0, a, b); - let e = _mm_set_sh(0.0); + unsafe fn test_mm_mask3_fmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 0, + ); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - let r = _mm_maskz_mul_sh(1, a, b); - let e = _mm_set_sh(2.0); + let r = _mm_mask3_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 1, + ); + let e = _mm_setr_ph(5.0, 30., 31., 32., 33., 34., 35., 36.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_fmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0, a, b, c, + ); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_maskz_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 1, a, b, c, + ); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_div_ph() { + unsafe fn test_mm_fmsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); - let r = _mm_div_ph(a, b); - let e = _mm_set1_ph(0.5); + let c = _mm_set1_ph(3.0); + let r = _mm_fmsub_ph(a, b, c); + let e = _mm_set1_ph(-1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_div_ph() { + unsafe fn test_mm_mask_fmsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); - let src = _mm_set_ph(4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0); - let r = _mm_mask_div_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5); + let c = _mm_set1_ph(3.0); + let r = _mm_mask_fmsub_ph(a, 0b01010101, b, c); + let e = _mm_set_ph(1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_div_ph() { + unsafe fn test_mm_mask3_fmsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); - let r = _mm_maskz_div_ph(0b01010101, a, b); - let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); + let c = _mm_set1_ph(3.0); + let r = _mm_mask3_fmsub_ph(a, b, c, 0b01010101); + let e = _mm_set_ph(3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_div_ph() { + unsafe fn test_mm_maskz_fmsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_maskz_fmsub_ph(0b01010101, a, b, c); + let e = _mm_set_ph(0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_fmsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); - let r = _mm256_div_ph(a, b); - let e = _mm256_set1_ph(0.5); + let c = _mm256_set1_ph(3.0); + let r = _mm256_fmsub_ph(a, b, c); + let e = _mm256_set1_ph(-1.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_div_ph() { + unsafe fn test_mm256_mask_fmsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); - let src = _mm256_set_ph( - 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, - 19.0, + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask_fmsub_ph(a, 0b0101010101010101, b, c); + let e = _mm256_set_ph( + 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, ); - let r = _mm256_mask_div_ph(src, 0b0101010101010101, a, b); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask3_fmsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask3_fmsub_ph(a, b, c, 0b0101010101010101); let e = _mm256_set_ph( - 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, + 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_div_ph() { + unsafe fn test_mm256_maskz_fmsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); - let r = _mm256_maskz_div_ph(0b0101010101010101, a, b); + let c = _mm256_set1_ph(3.0); + let r = _mm256_maskz_fmsub_ph(0b0101010101010101, a, b, c); let e = _mm256_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_div_ph() { + unsafe fn test_mm512_fmsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); - let r = _mm512_div_ph(a, b); - let e = _mm512_set1_ph(0.5); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fmsub_ph(a, b, c); + let e = _mm512_set1_ph(-1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_div_ph() { + unsafe fn test_mm512_mask_fmsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); - let src = _mm512_set_ph( - 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, - 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, - 33.0, 34.0, 35.0, + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmsub_ph(a, 0b01010101010101010101010101010101, b, c); + let e = _mm512_set_ph( + 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, + 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, ); - let r = _mm512_mask_div_ph(src, 0b01010101010101010101010101010101, a, b); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask3_fmsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmsub_ph(a, b, c, 0b01010101010101010101010101010101); let e = _mm512_set_ph( - 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, - 20.0, 0.5, 22.0, 0.5, 24.0, 0.5, 26.0, 0.5, 28.0, 0.5, 30.0, 0.5, 32.0, 0.5, 34.0, 0.5, + 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, + 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_div_ph() { + unsafe fn test_mm512_maskz_fmsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); - let r = _mm512_maskz_div_ph(0b01010101010101010101010101010101, a, b); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmsub_ph(0b01010101010101010101010101010101, a, b, c); let e = _mm512_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, - 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, + 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_div_round_ph() { + unsafe fn test_mm512_fmsub_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); - let r = _mm512_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_ph(0.5); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_ph(-1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_div_round_ph() { + unsafe fn test_mm512_mask_fmsub_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); - let src = _mm512_set_ph( - 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, - 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, - 33.0, 34.0, 35.0, - ); - let r = _mm512_mask_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0b01010101010101010101010101010101, + b, + c, + ); + let e = _mm512_set_ph( + 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, + 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask3_fmsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, + c, + 0b01010101010101010101010101010101, ); let e = _mm512_set_ph( - 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, - 20.0, 0.5, 22.0, 0.5, 24.0, 0.5, 26.0, 0.5, 28.0, 0.5, 30.0, 0.5, 32.0, 0.5, 34.0, 0.5, + 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, + 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_div_round_ph() { + unsafe fn test_mm512_maskz_fmsub_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); - let r = _mm512_maskz_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b01010101010101010101010101010101, a, b, + c, ); let e = _mm512_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, - 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, + 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_div_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_set_sh(0.5); + unsafe fn test_mm_fmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fmsub_sh(a, b, c); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_div_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_set_sh(4.0); + unsafe fn test_mm_mask_fmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fmsub_sh(a, 0, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_set_sh(0.5); + let r = _mm_mask_fmsub_sh(a, 1, b, c); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_div_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = - _mm_maskz_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_set_sh(0.0); + unsafe fn test_mm_mask3_fmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fmsub_sh(a, b, c, 0); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - let r = - _mm_maskz_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_set_sh(0.5); + let r = _mm_mask3_fmsub_sh(a, b, c, 1); + let e = _mm_setr_ph(-1.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_div_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_div_sh(a, b); - let e = _mm_set_sh(0.5); + unsafe fn test_mm_maskz_fmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fmsub_sh(0, a, b, c); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_maskz_fmsub_sh(1, a, b, c); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_div_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_div_sh(src, 0, a, b); - let e = _mm_set_sh(4.0); + unsafe fn test_mm_fmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_div_sh(src, 1, a, b); - let e = _mm_set_sh(0.5); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_fmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0, b, c, + ); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_mask_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 1, b, c, + ); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_div_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_maskz_div_sh(0, a, b); - let e = _mm_set_sh(0.0); + unsafe fn test_mm_mask3_fmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 0, + ); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - let r = _mm_maskz_div_sh(1, a, b); - let e = _mm_set_sh(0.5); + let r = _mm_mask3_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 1, + ); + let e = _mm_setr_ph(-1.0, 30., 31., 32., 33., 34., 35., 36.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_fmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0, a, b, c, + ); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_maskz_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 1, a, b, c, + ); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); - let r = _mm_mul_pch(a, b); - let e = _mm_set1_pch(-1.0, 0.0); + unsafe fn test_mm_fnmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_fnmadd_ph(a, b, c); + let e = _mm_set1_ph(1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_mul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); - let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); - let r = _mm_mask_mul_pch(src, 0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); + unsafe fn test_mm_mask_fnmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask_fnmadd_ph(a, 0b01010101, b, c); + let e = _mm_set_ph(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_mul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); - let r = _mm_maskz_mul_pch(0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); + unsafe fn test_mm_mask3_fnmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask3_fnmadd_ph(a, b, c, 0b01010101); + let e = _mm_set_ph(3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_mul_pch(a, b); - let e = _mm256_set1_pch(-1.0, 0.0); + unsafe fn test_mm_maskz_fnmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_maskz_fnmadd_ph(0b01010101, a, b, c); + let e = _mm_set_ph(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_fnmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_fnmadd_ph(a, b, c); + let e = _mm256_set1_ph(1.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_mul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); - let src = _mm256_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + unsafe fn test_mm256_mask_fnmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask_fnmadd_ph(a, 0b0101010101010101, b, c); + let e = _mm256_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ); - let r = _mm256_mask_mul_pch(src, 0b01010101, a, b); - let e = _mm256_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask3_fnmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask3_fnmadd_ph(a, b, c, 0b0101010101010101); + let e = _mm256_set_ph( + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_mul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_maskz_mul_pch(0b01010101, a, b); - let e = _mm256_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + unsafe fn test_mm256_maskz_fnmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_maskz_fnmadd_ph(0b0101010101010101, a, b, c); + let e = _mm256_set_ph( + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_mul_pch(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + unsafe fn test_mm512_fnmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fnmadd_ph(a, b, c); + let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_mul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, + unsafe fn test_mm512_mask_fnmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fnmadd_ph(a, 0b01010101010101010101010101010101, b, c); + let e = _mm512_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ); - let r = _mm512_mask_mul_pch(src, 0b0101010101010101, a, b); - let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask3_fnmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fnmadd_ph(a, b, c, 0b01010101010101010101010101010101); + let e = _mm512_set_ph( + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, + 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_mul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_maskz_mul_pch(0b0101010101010101, a, b); - let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + unsafe fn test_mm512_maskz_fnmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fnmadd_ph(0b01010101010101010101010101010101, a, b, c); + let e = _mm512_set_ph( + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + unsafe fn test_mm512_fnmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = + _mm512_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_mul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, + unsafe fn test_mm512_mask_fnmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, + 0b01010101010101010101010101010101, + b, + c, ); - let r = _mm512_mask_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, - 0b0101010101010101, + let e = _mm512_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask3_fnmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, + c, + 0b01010101010101010101010101010101, ); - let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + let e = _mm512_set_ph( + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, + 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_mul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_maskz_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b0101010101010101, + unsafe fn test_mm512_maskz_fnmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, a, b, + c, ); - let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + let e = _mm512_set_ph( + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_fnmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fnmadd_sh(a, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_mul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_mask_fnmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fnmadd_sh(a, 0, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_mask_fnmadd_sh(a, 1, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_mul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = - _mm_maskz_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_mask3_fnmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fnmadd_sh(a, b, c, 0); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + assert_eq_m128h(r, e); + let r = _mm_mask3_fnmadd_sh(a, b, c, 1); + let e = _mm_setr_ph(1.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_mul_sch(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_maskz_fnmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fnmadd_sh(0, a, b, c); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_maskz_fnmadd_sh(1, a, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_mul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_mul_sch(src, 0, a, b); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_fnmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_mul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_maskz_mul_sch(0, a, b); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_mask_fnmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0, b, c, + ); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_mask_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 1, b, c, + ); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask3_fnmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 0, + ); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + assert_eq_m128h(r, e); + let r = _mm_mask3_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 1, + ); + let e = _mm_setr_ph(1.0, 30., 31., 32., 33., 34., 35., 36.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_fnmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0, a, b, c, + ); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_maskz_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 1, a, b, c, + ); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); - let r = _mm_fmul_pch(a, b); - let e = _mm_set1_pch(-1.0, 0.0); + unsafe fn test_mm_fnmsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_fnmsub_ph(a, b, c); + let e = _mm_set1_ph(-5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); - let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); - let r = _mm_mask_fmul_pch(src, 0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); + unsafe fn test_mm_mask_fnmsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask_fnmsub_ph(a, 0b01010101, b, c); + let e = _mm_set_ph(1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); - let r = _mm_maskz_fmul_pch(0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); + unsafe fn test_mm_mask3_fnmsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask3_fnmsub_ph(a, b, c, 0b01010101); + let e = _mm_set_ph(3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_fmul_pch(a, b); - let e = _mm256_set1_pch(-1.0, 0.0); + unsafe fn test_mm_maskz_fnmsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_maskz_fnmsub_ph(0b01010101, a, b, c); + let e = _mm_set_ph(0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_fnmsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_fnmsub_ph(a, b, c); + let e = _mm256_set1_ph(-5.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); - let src = _mm256_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + unsafe fn test_mm256_mask_fnmsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask_fnmsub_ph(a, 0b0101010101010101, b, c); + let e = _mm256_set_ph( + 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, ); - let r = _mm256_mask_fmul_pch(src, 0b01010101, a, b); - let e = _mm256_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask3_fnmsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask3_fnmsub_ph(a, b, c, 0b0101010101010101); + let e = _mm256_set_ph( + 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_maskz_fmul_pch(0b01010101, a, b); - let e = _mm256_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + unsafe fn test_mm256_maskz_fnmsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_maskz_fnmsub_ph(0b0101010101010101, a, b, c); + let e = _mm256_set_ph( + 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_fmul_pch(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + unsafe fn test_mm512_fnmsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fnmsub_ph(a, b, c); + let e = _mm512_set1_ph(-5.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, + unsafe fn test_mm512_mask_fnmsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fnmsub_ph(a, 0b01010101010101010101010101010101, b, c); + let e = _mm512_set_ph( + 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, + 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, ); - let r = _mm512_mask_fmul_pch(src, 0b0101010101010101, a, b); - let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask3_fnmsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fnmsub_ph(a, b, c, 0b01010101010101010101010101010101); + let e = _mm512_set_ph( + 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, + 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_maskz_fmul_pch(0b0101010101010101, a, b); - let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + unsafe fn test_mm512_maskz_fnmsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fnmsub_ph(0b01010101010101010101010101010101, a, b, c); + let e = _mm512_set_ph( + 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, + 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + unsafe fn test_mm512_fnmsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = + _mm512_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_ph(-5.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, + unsafe fn test_mm512_mask_fnmsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, + 0b01010101010101010101010101010101, + b, + c, ); - let r = _mm512_mask_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, - 0b0101010101010101, + let e = _mm512_set_ph( + 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, + 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask3_fnmsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, + c, + 0b01010101010101010101010101010101, ); - let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + let e = _mm512_set_ph( + 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, + 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_maskz_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b0101010101010101, + unsafe fn test_mm512_maskz_fnmsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, a, b, + c, ); - let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + let e = _mm512_set_ph( + 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, + 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_fnmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fnmsub_sh(a, b, c); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_mask_fnmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fnmsub_sh(a, 0, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_mask_fnmsub_sh(a, 1, b, c); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = - _mm_maskz_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_mask3_fnmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fnmsub_sh(a, b, c, 0); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + assert_eq_m128h(r, e); + let r = _mm_mask3_fnmsub_sh(a, b, c, 1); + let e = _mm_setr_ph(-5.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_fmul_sch(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_maskz_fnmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fnmsub_sh(0, a, b, c); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_maskz_fnmsub_sh(1, a, b, c); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_fmul_sch(src, 0, a, b); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_fnmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_maskz_fmul_sch(0, a, b); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_mask_fnmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0, b, c, + ); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_mask_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 1, b, c, + ); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask3_fnmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 0, + ); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + assert_eq_m128h(r, e); + let r = _mm_mask3_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 1, + ); + let e = _mm_setr_ph(-5.0, 30., 31., 32., 33., 34., 35., 36.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_fnmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0, a, b, c, + ); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_maskz_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 1, a, b, c, + ); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_cmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let r = _mm_cmul_pch(a, b); - let e = _mm_set1_pch(-1.0, 0.0); + unsafe fn test_mm_fmaddsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_fmaddsub_ph(a, b, c); + let e = _mm_set_ph(5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_cmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); - let r = _mm_mask_cmul_pch(src, 0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); + unsafe fn test_mm_mask_fmaddsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask_fmaddsub_ph(a, 0b00110011, b, c); + let e = _mm_set_ph(1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_cmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let r = _mm_maskz_cmul_pch(0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); + unsafe fn test_mm_mask3_fmaddsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask3_fmaddsub_ph(a, b, c, 0b00110011); + let e = _mm_set_ph(3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_cmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let r = _mm256_cmul_pch(a, b); - let e = _mm256_set1_pch(-1.0, 0.0); - assert_eq_m256h(r, e); + unsafe fn test_mm_maskz_fmaddsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_maskz_fmaddsub_ph(0b00110011, a, b, c); + let e = _mm_set_ph(0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_cmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let src = _mm256_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - ); - let r = _mm256_mask_cmul_pch(src, 0b01010101, a, b); - let e = _mm256_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + unsafe fn test_mm256_fmaddsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_fmaddsub_ph(a, b, c); + let e = _mm256_set_ph( + 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_cmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let r = _mm256_maskz_cmul_pch(0b01010101, a, b); - let e = _mm256_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + unsafe fn test_mm256_mask_fmaddsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask_fmaddsub_ph(a, 0b0011001100110011, b, c); + let e = _mm256_set_ph( + 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, ); assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_cmul_pch(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); - assert_eq_m512h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask3_fmaddsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask3_fmaddsub_ph(a, b, c, 0b0011001100110011); + let e = _mm256_set_ph( + 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, + ); + assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, - ); - let r = _mm512_mask_cmul_pch(src, 0b0101010101010101, a, b); - let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_fmaddsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_maskz_fmaddsub_ph(0b0011001100110011, a, b, c); + let e = _mm256_set_ph( + 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, ); - assert_eq_m512h(r, e); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_cmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_maskz_cmul_pch(0b0101010101010101, a, b); - let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + unsafe fn test_mm512_fmaddsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fmaddsub_ph(a, b, c); + let e = _mm512_set_ph( + 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, + 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + unsafe fn test_mm512_mask_fmaddsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmaddsub_ph(a, 0b00110011001100110011001100110011, b, c); + let e = _mm512_set_ph( + 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, + 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, - ); - let r = _mm512_mask_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, - 0b0101010101010101, - a, - b, - ); - let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + unsafe fn test_mm512_mask3_fmaddsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmaddsub_ph(a, b, c, 0b00110011001100110011001100110011); + let e = _mm512_set_ph( + 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, + 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_cmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_maskz_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b0101010101010101, - a, - b, - ); - let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + unsafe fn test_mm512_maskz_fmaddsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmaddsub_ph(0b00110011001100110011001100110011, a, b, c); + let e = _mm512_set_ph( + 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, + 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_cmul_sch(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_cmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_cmul_sch(src, 0, a, b); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_fmaddsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = + _mm512_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set_ph( + 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, + 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_cmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_maskz_cmul_sch(0, a, b); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_mask_fmaddsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, + 0b00110011001100110011001100110011, + b, + c, + ); + let e = _mm512_set_ph( + 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, + 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_mask3_fmaddsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, + b, + c, + 0b00110011001100110011001100110011, + ); + let e = _mm512_set_ph( + 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, + 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_cmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, + unsafe fn test_mm512_maskz_fmaddsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b00110011001100110011001100110011, + a, + b, + c, ); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); + let e = _mm512_set_ph( + 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, + 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, + ); + assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_cmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = - _mm_maskz_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_fmsubadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_fmsubadd_ph(a, b, c); + let e = _mm_set_ph(-1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fcmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let r = _mm_fcmul_pch(a, b); - let e = _mm_set1_pch(-1.0, 0.0); + unsafe fn test_mm_mask_fmsubadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask_fmsubadd_ph(a, 0b00110011, b, c); + let e = _mm_set_ph(1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fcmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); - let r = _mm_mask_fcmul_pch(src, 0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); + unsafe fn test_mm_mask3_fmsubadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask3_fmsubadd_ph(a, b, c, 0b00110011); + let e = _mm_set_ph(3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fcmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let r = _mm_maskz_fcmul_pch(0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); + unsafe fn test_mm_maskz_fmsubadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_maskz_fmsubadd_ph(0b00110011, a, b, c); + let e = _mm_set_ph(0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fcmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let r = _mm256_fcmul_pch(a, b); - let e = _mm256_set1_pch(-1.0, 0.0); + unsafe fn test_mm256_fmsubadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_fmsubadd_ph(a, b, c); + let e = _mm256_set_ph( + -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fcmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let src = _mm256_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + unsafe fn test_mm256_mask_fmsubadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask_fmsubadd_ph(a, 0b0011001100110011, b, c); + let e = _mm256_set_ph( + 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, ); - let r = _mm256_mask_fcmul_pch(src, 0b01010101, a, b); - let e = _mm256_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask3_fmsubadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask3_fmsubadd_ph(a, b, c, 0b0011001100110011); + let e = _mm256_set_ph( + 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fcmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let r = _mm256_maskz_fcmul_pch(0b01010101, a, b); - let e = _mm256_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + unsafe fn test_mm256_maskz_fmsubadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_maskz_fmsubadd_ph(0b0011001100110011, a, b, c); + let e = _mm256_set_ph( + 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fcmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_fcmul_pch(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + unsafe fn test_mm512_fmsubadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fmsubadd_ph(a, b, c); + let e = _mm512_set_ph( + -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fcmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, - ); - let r = _mm512_mask_fcmul_pch(src, 0b0101010101010101, a, b); - let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + unsafe fn test_mm512_mask_fmsubadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmsubadd_ph(a, 0b00110011001100110011001100110011, b, c); + let e = _mm512_set_ph( + 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, + 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fcmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_maskz_fcmul_pch(0b0101010101010101, a, b); - let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + unsafe fn test_mm512_mask3_fmsubadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmsubadd_ph(a, b, c, 0b00110011001100110011001100110011); + let e = _mm512_set_ph( + 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, + 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fcmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + unsafe fn test_mm512_maskz_fmsubadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmsubadd_ph(0b00110011001100110011001100110011, a, b, c); + let e = _mm512_set_ph( + 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, + 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fcmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, + unsafe fn test_mm512_fmsubadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = + _mm512_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set_ph( + -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, ); - let r = _mm512_mask_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, - 0b0101010101010101, + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_fmsubadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, + 0b00110011001100110011001100110011, b, + c, ); - let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + let e = _mm512_set_ph( + 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, + 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fcmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_maskz_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b0101010101010101, + unsafe fn test_mm512_mask3_fmsubadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, + c, + 0b00110011001100110011001100110011, ); - let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + let e = _mm512_set_ph( + 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, + 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fcmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_fcmul_sch(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fcmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_fcmul_sch(src, 0, a, b); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fcmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_maskz_fcmul_sch(0, a, b); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fcmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fcmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, + unsafe fn test_mm512_maskz_fmsubadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b00110011001100110011001100110011, + a, + b, + c, ); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fcmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = - _mm_maskz_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); + let e = _mm512_set_ph( + 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, + 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, + ); + assert_eq_m512h(r, e); } } From e6a59102baa1ac32f5ea70895ca4b94e0a93f968 Mon Sep 17 00:00:00 2001 From: sayantn Date: Sat, 13 Jul 2024 12:47:28 +0530 Subject: [PATCH 05/11] AVX512FP16 Part 4: Math functions Reciprocal, RSqrt, Sqrt, Max, Min --- crates/core_arch/missing-x86.md | 78 - crates/core_arch/src/x86/avx512fp16.rs | 8009 +++++++++++++++--------- 2 files changed, 5039 insertions(+), 3048 deletions(-) diff --git a/crates/core_arch/missing-x86.md b/crates/core_arch/missing-x86.md index 08b3ab9a18..c0b8aa1457 100644 --- a/crates/core_arch/missing-x86.md +++ b/crates/core_arch/missing-x86.md @@ -159,20 +159,12 @@ * [ ] [`_mm512_mask_getexp_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_getexp_round_ph) * [ ] [`_mm512_mask_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_getmant_ph) * [ ] [`_mm512_mask_getmant_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_getmant_round_ph) - * [ ] [`_mm512_mask_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_max_ph) - * [ ] [`_mm512_mask_max_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_max_round_ph) - * [ ] [`_mm512_mask_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_min_ph) - * [ ] [`_mm512_mask_min_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_min_round_ph) - * [ ] [`_mm512_mask_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_rcp_ph) * [ ] [`_mm512_mask_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_ph) * [ ] [`_mm512_mask_reduce_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_round_ph) * [ ] [`_mm512_mask_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_roundscale_ph) * [ ] [`_mm512_mask_roundscale_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_roundscale_round_ph) - * [ ] [`_mm512_mask_rsqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_rsqrt_ph) * [ ] [`_mm512_mask_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_scalef_ph) * [ ] [`_mm512_mask_scalef_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_scalef_round_ph) - * [ ] [`_mm512_mask_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sqrt_ph) - * [ ] [`_mm512_mask_sqrt_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sqrt_round_ph) * [ ] [`_mm512_maskz_cvt_roundepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepi16_ph) * [ ] [`_mm512_maskz_cvt_roundepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepi32_ph) * [ ] [`_mm512_maskz_cvt_roundepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepi64_ph) @@ -221,27 +213,14 @@ * [ ] [`_mm512_maskz_getexp_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_getexp_round_ph) * [ ] [`_mm512_maskz_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_getmant_ph) * [ ] [`_mm512_maskz_getmant_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_getmant_round_ph) - * [ ] [`_mm512_maskz_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_max_ph) - * [ ] [`_mm512_maskz_max_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_max_round_ph) - * [ ] [`_mm512_maskz_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_min_ph) - * [ ] [`_mm512_maskz_min_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_min_round_ph) - * [ ] [`_mm512_maskz_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_rcp_ph) * [ ] [`_mm512_maskz_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_reduce_ph) * [ ] [`_mm512_maskz_reduce_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_reduce_round_ph) * [ ] [`_mm512_maskz_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_roundscale_ph) * [ ] [`_mm512_maskz_roundscale_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_roundscale_round_ph) - * [ ] [`_mm512_maskz_rsqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_rsqrt_ph) * [ ] [`_mm512_maskz_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_scalef_ph) * [ ] [`_mm512_maskz_scalef_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_scalef_round_ph) - * [ ] [`_mm512_maskz_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sqrt_ph) - * [ ] [`_mm512_maskz_sqrt_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sqrt_round_ph) - * [ ] [`_mm512_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_max_ph) - * [ ] [`_mm512_max_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_max_round_ph) - * [ ] [`_mm512_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_min_ph) - * [ ] [`_mm512_min_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_min_round_ph) * [ ] [`_mm512_permutex2var_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutex2var_ph) * [ ] [`_mm512_permutexvar_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutexvar_ph) - * [ ] [`_mm512_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_rcp_ph) * [ ] [`_mm512_reduce_add_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_add_ph) * [ ] [`_mm512_reduce_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_max_ph) * [ ] [`_mm512_reduce_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_min_ph) @@ -250,12 +229,9 @@ * [ ] [`_mm512_reduce_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_round_ph) * [ ] [`_mm512_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_roundscale_ph) * [ ] [`_mm512_roundscale_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_roundscale_round_ph) - * [ ] [`_mm512_rsqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_rsqrt_ph) * [ ] [`_mm512_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_scalef_ph) * [ ] [`_mm512_scalef_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_scalef_round_ph) * [ ] [`_mm512_set1_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set1_pch) - * [ ] [`_mm512_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sqrt_ph) - * [ ] [`_mm512_sqrt_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sqrt_round_ph) * [ ] [`_mm_cvt_roundi32_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundi32_sh) * [ ] [`_mm_cvt_roundi64_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundi64_sh) * [ ] [`_mm_cvt_roundsd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsd_sh) @@ -309,16 +285,12 @@ * [ ] [`_mm_mask_getexp_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getexp_sh) * [ ] [`_mm_mask_getmant_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getmant_round_sh) * [ ] [`_mm_mask_getmant_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getmant_sh) - * [ ] [`_mm_mask_rcp_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_rcp_sh) * [ ] [`_mm_mask_reduce_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_reduce_round_sh) * [ ] [`_mm_mask_reduce_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_reduce_sh) * [ ] [`_mm_mask_roundscale_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_roundscale_round_sh) * [ ] [`_mm_mask_roundscale_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_roundscale_sh) - * [ ] [`_mm_mask_rsqrt_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_rsqrt_sh) * [ ] [`_mm_mask_scalef_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_scalef_round_sh) * [ ] [`_mm_mask_scalef_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_scalef_sh) - * [ ] [`_mm_mask_sqrt_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sqrt_round_sh) - * [ ] [`_mm_mask_sqrt_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sqrt_sh) * [ ] [`_mm_maskz_cvt_roundsd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvt_roundsd_sh) * [ ] [`_mm_maskz_cvt_roundsh_sd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvt_roundsh_sd) * [ ] [`_mm_maskz_cvt_roundsh_ss`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvt_roundsh_ss) @@ -331,27 +303,19 @@ * [ ] [`_mm_maskz_getexp_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getexp_sh) * [ ] [`_mm_maskz_getmant_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getmant_round_sh) * [ ] [`_mm_maskz_getmant_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getmant_sh) - * [ ] [`_mm_maskz_rcp_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_rcp_sh) * [ ] [`_mm_maskz_reduce_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_reduce_round_sh) * [ ] [`_mm_maskz_reduce_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_reduce_sh) * [ ] [`_mm_maskz_roundscale_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_roundscale_round_sh) * [ ] [`_mm_maskz_roundscale_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_roundscale_sh) - * [ ] [`_mm_maskz_rsqrt_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_rsqrt_sh) * [ ] [`_mm_maskz_scalef_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_scalef_round_sh) * [ ] [`_mm_maskz_scalef_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_scalef_sh) - * [ ] [`_mm_maskz_sqrt_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sqrt_round_sh) - * [ ] [`_mm_maskz_sqrt_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sqrt_sh) - * [ ] [`_mm_rcp_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rcp_sh) * [ ] [`_mm_reduce_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_round_sh) * [ ] [`_mm_reduce_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_sh) * [ ] [`_mm_roundscale_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_roundscale_round_sh) * [ ] [`_mm_roundscale_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_roundscale_sh) - * [ ] [`_mm_rsqrt_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rsqrt_sh) * [ ] [`_mm_scalef_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_scalef_round_sh) * [ ] [`_mm_scalef_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_scalef_sh) * [ ] [`_mm_set1_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set1_pch) - * [ ] [`_mm_sqrt_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sqrt_round_sh) - * [ ] [`_mm_sqrt_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sqrt_sh)

@@ -410,14 +374,9 @@ * [ ] [`_mm256_mask_fpclass_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fpclass_ph_mask) * [ ] [`_mm256_mask_getexp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_getexp_ph) * [ ] [`_mm256_mask_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_getmant_ph) - * [ ] [`_mm256_mask_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_max_ph) - * [ ] [`_mm256_mask_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_min_ph) - * [ ] [`_mm256_mask_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_rcp_ph) * [ ] [`_mm256_mask_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_reduce_ph) * [ ] [`_mm256_mask_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_roundscale_ph) - * [ ] [`_mm256_mask_rsqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_rsqrt_ph) * [ ] [`_mm256_mask_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_scalef_ph) - * [ ] [`_mm256_mask_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_sqrt_ph) * [ ] [`_mm256_maskz_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi16_ph) * [ ] [`_mm256_maskz_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi32_ph) * [ ] [`_mm256_maskz_cvtepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi64_ph) @@ -442,28 +401,18 @@ * [ ] [`_mm256_maskz_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtxps_ph) * [ ] [`_mm256_maskz_getexp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_getexp_ph) * [ ] [`_mm256_maskz_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_getmant_ph) - * [ ] [`_mm256_maskz_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_max_ph) - * [ ] [`_mm256_maskz_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_min_ph) - * [ ] [`_mm256_maskz_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_rcp_ph) * [ ] [`_mm256_maskz_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_reduce_ph) * [ ] [`_mm256_maskz_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_roundscale_ph) - * [ ] [`_mm256_maskz_rsqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_rsqrt_ph) * [ ] [`_mm256_maskz_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_scalef_ph) - * [ ] [`_mm256_maskz_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_sqrt_ph) - * [ ] [`_mm256_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_max_ph) - * [ ] [`_mm256_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_min_ph) * [ ] [`_mm256_permutex2var_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutex2var_ph) * [ ] [`_mm256_permutexvar_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutexvar_ph) - * [ ] [`_mm256_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_rcp_ph) * [ ] [`_mm256_reduce_add_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_reduce_add_ph) * [ ] [`_mm256_reduce_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_reduce_max_ph) * [ ] [`_mm256_reduce_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_reduce_min_ph) * [ ] [`_mm256_reduce_mul_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_reduce_mul_ph) * [ ] [`_mm256_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_reduce_ph) * [ ] [`_mm256_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_roundscale_ph) - * [ ] [`_mm256_rsqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_rsqrt_ph) * [ ] [`_mm256_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_scalef_ph) - * [ ] [`_mm256_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sqrt_ph) * [ ] [`_mm_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_ph_mask) * [ ] [`_mm_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi16_ph) * [ ] [`_mm_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi32_ph) @@ -517,18 +466,9 @@ * [ ] [`_mm_mask_fpclass_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fpclass_ph_mask) * [ ] [`_mm_mask_getexp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getexp_ph) * [ ] [`_mm_mask_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getmant_ph) - * [ ] [`_mm_mask_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_max_ph) - * [ ] [`_mm_mask_max_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_max_round_sh) - * [ ] [`_mm_mask_max_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_max_sh) - * [ ] [`_mm_mask_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_min_ph) - * [ ] [`_mm_mask_min_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_min_round_sh) - * [ ] [`_mm_mask_min_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_min_sh) - * [ ] [`_mm_mask_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_rcp_ph) * [ ] [`_mm_mask_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_reduce_ph) * [ ] [`_mm_mask_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_roundscale_ph) - * [ ] [`_mm_mask_rsqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_rsqrt_ph) * [ ] [`_mm_mask_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_scalef_ph) - * [ ] [`_mm_mask_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sqrt_ph) * [ ] [`_mm_maskz_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi16_ph) * [ ] [`_mm_maskz_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi32_ph) * [ ] [`_mm_maskz_cvtepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi64_ph) @@ -553,36 +493,18 @@ * [ ] [`_mm_maskz_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtxps_ph) * [ ] [`_mm_maskz_getexp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getexp_ph) * [ ] [`_mm_maskz_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getmant_ph) - * [ ] [`_mm_maskz_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_max_ph) - * [ ] [`_mm_maskz_max_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_max_round_sh) - * [ ] [`_mm_maskz_max_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_max_sh) - * [ ] [`_mm_maskz_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_min_ph) - * [ ] [`_mm_maskz_min_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_min_round_sh) - * [ ] [`_mm_maskz_min_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_min_sh) - * [ ] [`_mm_maskz_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_rcp_ph) * [ ] [`_mm_maskz_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_reduce_ph) * [ ] [`_mm_maskz_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_roundscale_ph) - * [ ] [`_mm_maskz_rsqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_rsqrt_ph) * [ ] [`_mm_maskz_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_scalef_ph) - * [ ] [`_mm_maskz_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sqrt_ph) - * [ ] [`_mm_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_ph) - * [ ] [`_mm_max_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_round_sh) - * [ ] [`_mm_max_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_sh) - * [ ] [`_mm_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_ph) - * [ ] [`_mm_min_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_round_sh) - * [ ] [`_mm_min_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_sh) * [ ] [`_mm_permutex2var_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permutex2var_ph) * [ ] [`_mm_permutexvar_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permutexvar_ph) - * [ ] [`_mm_rcp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rcp_ph) * [ ] [`_mm_reduce_add_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_add_ph) * [ ] [`_mm_reduce_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_max_ph) * [ ] [`_mm_reduce_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_min_ph) * [ ] [`_mm_reduce_mul_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_mul_ph) * [ ] [`_mm_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_ph) * [ ] [`_mm_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_roundscale_ph) - * [ ] [`_mm_rsqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rsqrt_ph) * [ ] [`_mm_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_scalef_ph) - * [ ] [`_mm_sqrt_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sqrt_ph)

diff --git a/crates/core_arch/src/x86/avx512fp16.rs b/crates/core_arch/src/x86/avx512fp16.rs index 11e5f7d8e9..b30bc63ed4 100644 --- a/crates/core_arch/src/x86/avx512fp16.rs +++ b/crates/core_arch/src/x86/avx512fp16.rs @@ -7269,6 +7269,1177 @@ pub unsafe fn _mm512_maskz_fmsubadd_round_ph( ) } +/// Compute the approximate reciprocal of packed 16-bit floating-point elements in `a` and stores the results in `dst`. +/// The maximum relative error for this approximation is less than `1.5*2^-12`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rcp_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vrcpph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_rcp_ph(a: __m128h) -> __m128h { + _mm_mask_rcp_ph(_mm_undefined_ph(), 0xff, a) +} + +/// Compute the approximate reciprocal of packed 16-bit floating-point elements in `a` and stores the results in `dst` +/// using writemask `k` (elements are copied from `src` when the corresponding mask bit is not set). +/// The maximum relative error for this approximation is less than `1.5*2^-12`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_rcp_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vrcpph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_rcp_ph(src: __m128h, k: __mmask8, a: __m128h) -> __m128h { + vrcpph_128(a, src, k) +} + +/// Compute the approximate reciprocal of packed 16-bit floating-point elements in `a` and stores the results in `dst` +/// using zeromask `k` (elements are zeroed out when the corresponding mask bit is not set). +/// The maximum relative error for this approximation is less than `1.5*2^-12`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_rcp_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vrcpph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_rcp_ph(k: __mmask8, a: __m128h) -> __m128h { + _mm_mask_rcp_ph(_mm_setzero_ph(), k, a) +} + +/// Compute the approximate reciprocal of packed 16-bit floating-point elements in `a` and stores the results in `dst`. +/// The maximum relative error for this approximation is less than `1.5*2^-12`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_rcp_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vrcpph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_rcp_ph(a: __m256h) -> __m256h { + _mm256_mask_rcp_ph(_mm256_undefined_ph(), 0xffff, a) +} + +/// Compute the approximate reciprocal of packed 16-bit floating-point elements in `a` and stores the results in `dst` +/// using writemask `k` (elements are copied from `src` when the corresponding mask bit is not set). +/// The maximum relative error for this approximation is less than `1.5*2^-12`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_rcp_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vrcpph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_rcp_ph(src: __m256h, k: __mmask16, a: __m256h) -> __m256h { + vrcpph_256(a, src, k) +} + +/// Compute the approximate reciprocal of packed 16-bit floating-point elements in `a` and stores the results in `dst` +/// using zeromask `k` (elements are zeroed out when the corresponding mask bit is not set). +/// The maximum relative error for this approximation is less than `1.5*2^-12`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_rcp_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vrcpph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_rcp_ph(k: __mmask16, a: __m256h) -> __m256h { + _mm256_mask_rcp_ph(_mm256_setzero_ph(), k, a) +} + +/// Compute the approximate reciprocal of packed 16-bit floating-point elements in `a` and stores the results in `dst`. +/// The maximum relative error for this approximation is less than `1.5*2^-12`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_rcp_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vrcpph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_rcp_ph(a: __m512h) -> __m512h { + _mm512_mask_rcp_ph(_mm512_undefined_ph(), 0xffffffff, a) +} + +/// Compute the approximate reciprocal of packed 16-bit floating-point elements in `a` and stores the results in `dst` +/// using writemask `k` (elements are copied from `src` when the corresponding mask bit is not set). +/// The maximum relative error for this approximation is less than `1.5*2^-12`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_rcp_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vrcpph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_rcp_ph(src: __m512h, k: __mmask32, a: __m512h) -> __m512h { + vrcpph_512(a, src, k) +} + +/// Compute the approximate reciprocal of packed 16-bit floating-point elements in `a` and stores the results in `dst` +/// using zeromask `k` (elements are zeroed out when the corresponding mask bit is not set). +/// The maximum relative error for this approximation is less than `1.5*2^-12`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_rcp_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vrcpph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_rcp_ph(k: __mmask32, a: __m512h) -> __m512h { + _mm512_mask_rcp_ph(_mm512_setzero_ph(), k, a) +} + +/// Compute the approximate reciprocal of the lower half-precision (16-bit) floating-point element in b, +/// store the result in the lower element of dst, and copy the upper 7 packed elements from a to the +/// upper elements of dst. +/// The maximum relative error for this approximation is less than `1.5*2^-12`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rcp_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vrcpsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_rcp_sh(a: __m128h, b: __m128h) -> __m128h { + _mm_mask_rcp_sh(_mm_undefined_ph(), 0xff, a, b) +} + +/// Compute the approximate reciprocal of the lower half-precision (16-bit) floating-point element in b, +/// store the result in the lower element of dst using writemask k (the element is copied from src when +/// mask bit 0 is not set), and copy the upper 7 packed elements from a to the upper elements of dst. +/// The maximum relative error for this approximation is less than `1.5*2^-12`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_rcp_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vrcpsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_rcp_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + vrcpsh(a, b, src, k) +} + +/// Compute the approximate reciprocal of the lower half-precision (16-bit) floating-point element in b, +/// store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 +/// is not set), and copy the upper 7 packed elements from a to the upper elements of dst. +/// The maximum relative error for this approximation is less than `1.5*2^-12`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_rcp_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vrcpsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_rcp_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_rcp_sh(_mm_setzero_ph(), k, a, b) +} + +/// Compute the approximate reciprocal square root of packed half-precision (16-bit) floating-point +/// elements in a, and store the results in dst. +/// The maximum relative error for this approximation is less than `1.5*2^-12`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rsqrt_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vrsqrtph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_rsqrt_ph(a: __m128h) -> __m128h { + _mm_mask_rsqrt_ph(_mm_undefined_ph(), 0xff, a) +} + +/// Compute the approximate reciprocal square root of packed half-precision (16-bit) floating-point +/// elements in a, and store the results in dst using writemask k (elements are copied from src when +/// the corresponding mask bit is not set). +/// The maximum relative error for this approximation is less than `1.5*2^-12`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_rsqrt_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vrsqrtph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_rsqrt_ph(src: __m128h, k: __mmask8, a: __m128h) -> __m128h { + vrsqrtph_128(a, src, k) +} + +/// Compute the approximate reciprocal square root of packed half-precision (16-bit) floating-point +/// elements in a, and store the results in dst using zeromask k (elements are zeroed out when the +/// corresponding mask bit is not set). +/// The maximum relative error for this approximation is less than `1.5*2^-12`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_rsqrt_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vrsqrtph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_rsqrt_ph(k: __mmask8, a: __m128h) -> __m128h { + _mm_mask_rsqrt_ph(_mm_setzero_ph(), k, a) +} + +/// Compute the approximate reciprocal square root of packed half-precision (16-bit) floating-point +/// elements in a, and store the results in dst. +/// The maximum relative error for this approximation is less than `1.5*2^-12`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_rsqrt_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vrsqrtph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_rsqrt_ph(a: __m256h) -> __m256h { + _mm256_mask_rsqrt_ph(_mm256_undefined_ph(), 0xffff, a) +} + +/// Compute the approximate reciprocal square root of packed half-precision (16-bit) floating-point +/// elements in a, and store the results in dst using writemask k (elements are copied from src when +/// the corresponding mask bit is not set). +/// The maximum relative error for this approximation is less than `1.5*2^-12`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_rsqrt_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vrsqrtph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_rsqrt_ph(src: __m256h, k: __mmask16, a: __m256h) -> __m256h { + vrsqrtph_256(a, src, k) +} + +/// Compute the approximate reciprocal square root of packed half-precision (16-bit) floating-point +/// elements in a, and store the results in dst using zeromask k (elements are zeroed out when the +/// corresponding mask bit is not set). +/// The maximum relative error for this approximation is less than `1.5*2^-12`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_rsqrt_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vrsqrtph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_rsqrt_ph(k: __mmask16, a: __m256h) -> __m256h { + _mm256_mask_rsqrt_ph(_mm256_setzero_ph(), k, a) +} + +/// Compute the approximate reciprocal square root of packed half-precision (16-bit) floating-point +/// elements in a, and store the results in dst. +/// The maximum relative error for this approximation is less than `1.5*2^-12`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_rsqrt_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vrsqrtph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_rsqrt_ph(a: __m512h) -> __m512h { + _mm512_mask_rsqrt_ph(_mm512_undefined_ph(), 0xffffffff, a) +} + +/// Compute the approximate reciprocal square root of packed half-precision (16-bit) floating-point +/// elements in a, and store the results in dst using writemask k (elements are copied from src when +/// the corresponding mask bit is not set). +/// The maximum relative error for this approximation is less than `1.5*2^-12`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_rsqrt_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vrsqrtph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_rsqrt_ph(src: __m512h, k: __mmask32, a: __m512h) -> __m512h { + vrsqrtph_512(a, src, k) +} + +/// Compute the approximate reciprocal square root of packed half-precision (16-bit) floating-point +/// elements in a, and store the results in dst using zeromask k (elements are zeroed out when the +/// corresponding mask bit is not set). +/// The maximum relative error for this approximation is less than `1.5*2^-12`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_rsqrt_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vrsqrtph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_rsqrt_ph(k: __mmask32, a: __m512h) -> __m512h { + _mm512_mask_rsqrt_ph(_mm512_setzero_ph(), k, a) +} + +/// Compute the approximate reciprocal square root of the lower half-precision (16-bit) floating-point +/// element in b, store the result in the lower element of dst, and copy the upper 7 packed elements from a +/// to the upper elements of dst. +/// The maximum relative error for this approximation is less than `1.5*2^-12`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rsqrt_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vrsqrtsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_rsqrt_sh(a: __m128h, b: __m128h) -> __m128h { + _mm_mask_rsqrt_sh(_mm_undefined_ph(), 0xff, a, b) +} + +/// Compute the approximate reciprocal square root of the lower half-precision (16-bit) floating-point +/// element in b, store the result in the lower element of dst using writemask k (the element is copied from src +/// when mask bit 0 is not set), and copy the upper 7 packed elements from a to the upper elements of dst. +/// The maximum relative error for this approximation is less than `1.5*2^-12`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_rsqrt_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vrsqrtsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_rsqrt_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + vrsqrtsh(a, b, src, k) +} + +/// Compute the approximate reciprocal square root of the lower half-precision (16-bit) floating-point +/// element in b, store the result in the lower element of dst using zeromask k (the element is zeroed out when +/// mask bit 0 is not set), and copy the upper 7 packed elements from a to the upper elements of dst. +/// The maximum relative error for this approximation is less than `1.5*2^-12`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_rsqrt_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vrsqrtsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_rsqrt_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_rsqrt_sh(_mm_setzero_ph(), k, a, b) +} + +/// Compute the square root of packed half-precision (16-bit) floating-point elements in a, and store the +/// results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sqrt_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vsqrtph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_sqrt_ph(a: __m128h) -> __m128h { + simd_fsqrt(a) +} + +/// Compute the square root of packed half-precision (16-bit) floating-point elements in a, and store the +/// results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sqrt_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vsqrtph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_sqrt_ph(src: __m128h, k: __mmask8, a: __m128h) -> __m128h { + simd_select_bitmask(k, _mm_sqrt_ph(a), src) +} + +/// Compute the square root of packed half-precision (16-bit) floating-point elements in a, and store the +/// results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sqrt_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vsqrtph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_sqrt_ph(k: __mmask8, a: __m128h) -> __m128h { + simd_select_bitmask(k, _mm_sqrt_ph(a), _mm_setzero_ph()) +} + +/// Compute the square root of packed half-precision (16-bit) floating-point elements in a, and store the +/// results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sqrt_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vsqrtph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_sqrt_ph(a: __m256h) -> __m256h { + simd_fsqrt(a) +} + +/// Compute the square root of packed half-precision (16-bit) floating-point elements in a, and store the +/// results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_sqrt_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vsqrtph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_sqrt_ph(src: __m256h, k: __mmask16, a: __m256h) -> __m256h { + simd_select_bitmask(k, _mm256_sqrt_ph(a), src) +} + +/// Compute the square root of packed half-precision (16-bit) floating-point elements in a, and store the +/// results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_sqrt_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vsqrtph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_sqrt_ph(k: __mmask16, a: __m256h) -> __m256h { + simd_select_bitmask(k, _mm256_sqrt_ph(a), _mm256_setzero_ph()) +} + +/// Compute the square root of packed half-precision (16-bit) floating-point elements in a, and store the +/// results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sqrt_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vsqrtph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_sqrt_ph(a: __m512h) -> __m512h { + simd_fsqrt(a) +} + +/// Compute the square root of packed half-precision (16-bit) floating-point elements in a, and store the +/// results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sqrt_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vsqrtph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_sqrt_ph(src: __m512h, k: __mmask32, a: __m512h) -> __m512h { + simd_select_bitmask(k, _mm512_sqrt_ph(a), src) +} + +/// Compute the square root of packed half-precision (16-bit) floating-point elements in a, and store the +/// results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sqrt_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vsqrtph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_sqrt_ph(k: __mmask32, a: __m512h) -> __m512h { + simd_select_bitmask(k, _mm512_sqrt_ph(a), _mm512_setzero_ph()) +} + +/// Compute the square root of packed half-precision (16-bit) floating-point elements in a, and store the +/// results in dst. +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_sqrt_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vsqrtph, ROUNDING = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_sqrt_round_ph(a: __m512h) -> __m512h { + static_assert_rounding!(ROUNDING); + vsqrtph_512(a, ROUNDING) +} + +/// Compute the square root of packed half-precision (16-bit) floating-point elements in a, and store the +/// results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_sqrt_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vsqrtph, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_sqrt_round_ph( + src: __m512h, + k: __mmask32, + a: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + simd_select_bitmask(k, _mm512_sqrt_round_ph::(a), src) +} + +/// Compute the square root of packed half-precision (16-bit) floating-point elements in a, and store the +/// results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_sqrt_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vsqrtph, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_sqrt_round_ph(k: __mmask32, a: __m512h) -> __m512h { + static_assert_rounding!(ROUNDING); + simd_select_bitmask(k, _mm512_sqrt_round_ph::(a), _mm512_setzero_ph()) +} + +/// Compute the square root of the lower half-precision (16-bit) floating-point element in b, store +/// the result in the lower element of dst, and copy the upper 7 packed elements from a to the upper +/// elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sqrt_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vsqrtsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_sqrt_sh(a: __m128h, b: __m128h) -> __m128h { + _mm_mask_sqrt_sh(_mm_undefined_ph(), 0xff, a, b) +} + +/// Compute the square root of the lower half-precision (16-bit) floating-point element in b, store +/// the result in the lower element of dst using writemask k (the element is copied from src when mask +/// bit 0 is not set), and copy the upper 7 packed elements from a to the upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sqrt_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vsqrtsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_sqrt_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_sqrt_round_sh::<_MM_FROUND_CUR_DIRECTION>(src, k, a, b) +} + +/// Compute the square root of the lower half-precision (16-bit) floating-point element in b, store +/// the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 +/// is not set), and copy the upper 7 packed elements from a to the upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sqrt_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vsqrtsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_sqrt_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_sqrt_sh(_mm_setzero_ph(), k, a, b) +} + +/// Compute the square root of the lower half-precision (16-bit) floating-point element in b, store +/// the result in the lower element of dst, and copy the upper 7 packed elements from a to the upper +/// elements of dst. +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sqrt_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vsqrtsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_sqrt_round_sh(a: __m128h, b: __m128h) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm_mask_sqrt_round_sh::(_mm_undefined_ph(), 0xff, a, b) +} + +/// Compute the square root of the lower half-precision (16-bit) floating-point element in b, store +/// the result in the lower element of dst using writemask k (the element is copied from src when mask +/// bit 0 is not set), and copy the upper 7 packed elements from a to the upper elements of dst. +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_sqrt_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vsqrtsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_sqrt_round_sh( + src: __m128h, + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + vsqrtsh(a, b, src, k, ROUNDING) +} + +/// Compute the square root of the lower half-precision (16-bit) floating-point element in b, store +/// the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 +/// is not set), and copy the upper 7 packed elements from a to the upper elements of dst. +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_sqrt_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vsqrtsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_sqrt_round_sh( + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm_mask_sqrt_round_sh::(_mm_setzero_ph(), k, a, b) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b, and store packed maximum +/// values in dst. Does not follow the IEEE Standard for Floating-Point Arithmetic (IEEE 754) maximum +/// value when inputs are NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vmaxph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_max_ph(a: __m128h, b: __m128h) -> __m128h { + vmaxph_128(a, b) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b, and store packed maximum +/// values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Does not follow the IEEE Standard for Floating-Point Arithmetic (IEEE 754) maximum value when inputs are +/// NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_max_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vmaxph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_max_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + simd_select_bitmask(k, _mm_max_ph(a, b), src) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b, and store packed maximum +/// values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// Does not follow the IEEE Standard for Floating-Point Arithmetic (IEEE 754) maximum value when inputs are +/// NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_max_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vmaxph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_max_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + simd_select_bitmask(k, _mm_max_ph(a, b), _mm_setzero_ph()) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b, and store packed maximum +/// values in dst. Does not follow the IEEE Standard for Floating-Point Arithmetic (IEEE 754) maximum +/// value when inputs are NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_max_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vmaxph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_max_ph(a: __m256h, b: __m256h) -> __m256h { + vmaxph_256(a, b) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b, and store packed maximum +/// values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Does not follow the IEEE Standard for Floating-Point Arithmetic (IEEE 754) maximum value when inputs are +/// NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_max_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vmaxph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_max_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) -> __m256h { + simd_select_bitmask(k, _mm256_max_ph(a, b), src) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b, and store packed maximum +/// values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// Does not follow the IEEE Standard for Floating-Point Arithmetic (IEEE 754) maximum value when inputs are +/// NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_max_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vmaxph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_max_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h { + simd_select_bitmask(k, _mm256_max_ph(a, b), _mm256_setzero_ph()) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b, and store packed maximum +/// values in dst. Does not follow the IEEE Standard for Floating-Point Arithmetic (IEEE 754) maximum +/// value when inputs are NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_max_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vmaxph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_max_ph(a: __m512h, b: __m512h) -> __m512h { + _mm512_max_round_ph::<_MM_FROUND_CUR_DIRECTION>(a, b) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b, and store packed maximum +/// values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Does not follow the IEEE Standard for Floating-Point Arithmetic (IEEE 754) maximum value when inputs are +/// NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_max_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vmaxph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_max_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) -> __m512h { + simd_select_bitmask(k, _mm512_max_ph(a, b), src) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b, and store packed maximum +/// values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// Does not follow the IEEE Standard for Floating-Point Arithmetic (IEEE 754) maximum value when inputs are +/// NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_max_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vmaxph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_max_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { + simd_select_bitmask(k, _mm512_max_ph(a, b), _mm512_setzero_ph()) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b, and store packed maximum +/// values in dst. Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// Does not follow the IEEE Standard for Floating-Point Arithmetic (IEEE 754) maximum value when inputs are +/// NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_max_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vmaxph, SAE = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_max_round_ph(a: __m512h, b: __m512h) -> __m512h { + static_assert_sae!(SAE); + vmaxph_512(a, b, SAE) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b, and store packed maximum +/// values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. Does not follow the +/// IEEE Standard for Floating-Point Arithmetic (IEEE 754) maximum value when inputs are NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_max_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vmaxph, SAE = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_max_round_ph( + src: __m512h, + k: __mmask32, + a: __m512h, + b: __m512h, +) -> __m512h { + static_assert_sae!(SAE); + simd_select_bitmask(k, _mm512_max_round_ph::(a, b), src) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b, and store packed maximum +/// values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. Does not follow the +/// IEEE Standard for Floating-Point Arithmetic (IEEE 754) maximum value when inputs are NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_max_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vmaxph, SAE = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_max_round_ph( + k: __mmask32, + a: __m512h, + b: __m512h, +) -> __m512h { + static_assert_sae!(SAE); + simd_select_bitmask(k, _mm512_max_round_ph::(a, b), _mm512_setzero_ph()) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b, store the maximum +/// value in the lower element of dst, and copy the upper 7 packed elements from a to the upper elements +/// of dst. Does not follow the IEEE Standard for Floating-Point Arithmetic (IEEE 754) maximum value +/// when inputs are NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_sh) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vmaxsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_max_sh(a: __m128h, b: __m128h) -> __m128h { + _mm_mask_max_sh(_mm_undefined_ph(), 0xff, a, b) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b, store the maximum +/// value in the lower element of dst using writemask k (the element is copied from src when mask bit 0 +/// is not set), and copy the upper 7 packed elements from a to the upper elements of dst. Does not follow +/// the IEEE Standard for Floating-Point Arithmetic (IEEE 754) maximum value when inputs are NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_max_sh) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vmaxsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_max_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_max_round_sh::<_MM_FROUND_CUR_DIRECTION>(src, k, a, b) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b, store the maximum value +/// in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and +/// copy the upper 7 packed elements from a to the upper elements of dst. Does not follow the IEEE Standard +/// for Floating-Point Arithmetic (IEEE 754) maximum value when inputs are NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_max_sh) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vmaxsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_max_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_max_sh(_mm_setzero_ph(), k, a, b) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b, store the maximum value +/// in the lower element of dst, and copy the upper 7 packed elements from a to the upper elements of dst. +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. Does not follow the +/// IEEE Standard for Floating-Point Arithmetic (IEEE 754) maximum value when inputs are NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vmaxsh, SAE = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_max_round_sh(a: __m128h, b: __m128h) -> __m128h { + static_assert_sae!(SAE); + _mm_mask_max_round_sh::(_mm_undefined_ph(), 0xff, a, b) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b, store the maximum value +/// in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), +/// and copy the upper 7 packed elements from a to the upper elements of dst. Exceptions can be suppressed by +/// passing _MM_FROUND_NO_EXC in the sae parameter. Does not follow the IEEE Standard for Floating-Point Arithmetic +/// (IEEE 754) maximum value when inputs are NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_max_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vmaxsh, SAE = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_max_round_sh( + src: __m128h, + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_sae!(SAE); + vmaxsh(a, b, src, k, SAE) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b, store the maximum value +/// in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and +/// copy the upper 7 packed elements from a to the upper elements of dst. Exceptions can be suppressed by +/// passing _MM_FROUND_NO_EXC in the sae parameter. Does not follow the IEEE Standard for Floating-Point Arithmetic +/// (IEEE 754) maximum value when inputs are NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_max_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vmaxsh, SAE = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_max_round_sh( + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_sae!(SAE); + _mm_mask_max_round_sh::(_mm_setzero_ph(), k, a, b) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b, and store packed minimum +/// values in dst. Does not follow the IEEE Standard for Floating-Point Arithmetic (IEEE 754) minimum value +/// when inputs are NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vminph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_min_ph(a: __m128h, b: __m128h) -> __m128h { + vminph_128(a, b) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b, and store packed minimum +/// values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Does not follow the IEEE Standard for Floating-Point Arithmetic (IEEE 754) minimum value when inputs are +/// NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_min_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vminph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_min_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + simd_select_bitmask(k, _mm_min_ph(a, b), src) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b, and store packed minimum +/// values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// Does not follow the IEEE Standard for Floating-Point Arithmetic (IEEE 754) minimum value when inputs are +/// NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_min_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vminph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_min_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + simd_select_bitmask(k, _mm_min_ph(a, b), _mm_setzero_ph()) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b, and store packed minimum +/// values in dst. Does not follow the IEEE Standard for Floating-Point Arithmetic (IEEE 754) minimum value +/// when inputs are NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_min_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vminph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_min_ph(a: __m256h, b: __m256h) -> __m256h { + vminph_256(a, b) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b, and store packed minimum +/// values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Does not follow the IEEE Standard for Floating-Point Arithmetic (IEEE 754) minimum value when inputs are +/// NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_min_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vminph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_min_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) -> __m256h { + simd_select_bitmask(k, _mm256_min_ph(a, b), src) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b, and store packed minimum +/// values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// Does not follow the IEEE Standard for Floating-Point Arithmetic (IEEE 754) minimum value when inputs are +/// NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_min_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vminph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_min_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h { + simd_select_bitmask(k, _mm256_min_ph(a, b), _mm256_setzero_ph()) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b, and store packed minimum +/// values in dst. Does not follow the IEEE Standard for Floating-Point Arithmetic (IEEE 754) minimum value +/// when inputs are NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_min_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vminph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_min_ph(a: __m512h, b: __m512h) -> __m512h { + _mm512_min_round_ph::<_MM_FROUND_CUR_DIRECTION>(a, b) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b, and store packed minimum +/// values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Does not follow the IEEE Standard for Floating-Point Arithmetic (IEEE 754) minimum value when inputs are +/// NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_min_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vminph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_min_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) -> __m512h { + simd_select_bitmask(k, _mm512_min_ph(a, b), src) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b, and store packed minimum +/// values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// Does not follow the IEEE Standard for Floating-Point Arithmetic (IEEE 754) minimum value when inputs are +/// NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_min_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vminph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_min_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { + simd_select_bitmask(k, _mm512_min_ph(a, b), _mm512_setzero_ph()) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b, and store packed minimum +/// values in dst. Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. Does not +/// follow the IEEE Standard for Floating-Point Arithmetic (IEEE 754) minimum value when inputs are NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_min_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vminph, SAE = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_min_round_ph(a: __m512h, b: __m512h) -> __m512h { + static_assert_sae!(SAE); + vminph_512(a, b, SAE) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b, and store packed minimum +/// values in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. Does not follow the +/// IEEE Standard for Floating-Point Arithmetic (IEEE 754) minimum value when inputs are NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_min_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vminph, SAE = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_min_round_ph( + src: __m512h, + k: __mmask32, + a: __m512h, + b: __m512h, +) -> __m512h { + static_assert_sae!(SAE); + simd_select_bitmask(k, _mm512_min_round_ph::(a, b), src) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b, and store packed minimum +/// values in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. Does not follow the +/// IEEE Standard for Floating-Point Arithmetic (IEEE 754) minimum value when inputs are NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_min_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vminph, SAE = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_min_round_ph( + k: __mmask32, + a: __m512h, + b: __m512h, +) -> __m512h { + static_assert_sae!(SAE); + simd_select_bitmask(k, _mm512_min_round_ph::(a, b), _mm512_setzero_ph()) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b, store the minimum +/// value in the lower element of dst, and copy the upper 7 packed elements from a to the upper elements +/// of dst. Does not follow the IEEE Standard for Floating-Point Arithmetic (IEEE 754) minimum value when +/// inputs are NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_sh) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vminsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_min_sh(a: __m128h, b: __m128h) -> __m128h { + _mm_mask_min_sh(_mm_undefined_ph(), 0xff, a, b) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b, store the minimum +/// value in the lower element of dst using writemask k (the element is copied from src when mask bit 0 +/// is not set), and copy the upper 7 packed elements from a to the upper elements of dst. Does not follow +/// the IEEE Standard for Floating-Point Arithmetic (IEEE 754) minimum value when inputs are NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_min_sh) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vminsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_min_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_min_round_sh::<_MM_FROUND_CUR_DIRECTION>(src, k, a, b) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b, store the minimum value +/// in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and +/// copy the upper 7 packed elements from a to the upper elements of dst. Does not follow the IEEE Standard +/// for Floating-Point Arithmetic (IEEE 754) minimum value when inputs are NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_min_sh) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vminsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_min_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_min_sh(_mm_setzero_ph(), k, a, b) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b, store the minimum value +/// in the lower element of dst, and copy the upper 7 packed elements from a to the upper elements of dst. +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. Does not follow the +/// IEEE Standard for Floating-Point Arithmetic (IEEE 754) minimum value when inputs are NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vminsh, SAE = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_min_round_sh(a: __m128h, b: __m128h) -> __m128h { + static_assert_sae!(SAE); + _mm_mask_min_round_sh::(_mm_undefined_ph(), 0xff, a, b) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b, store the minimum value +/// in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), +/// and copy the upper 7 packed elements from a to the upper elements of dst. Exceptions can be suppressed by +/// passing _MM_FROUND_NO_EXC in the sae parameter. Does not follow the IEEE Standard for Floating-Point Arithmetic +/// (IEEE 754) minimum value when inputs are NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_min_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vminsh, SAE = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_min_round_sh( + src: __m128h, + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_sae!(SAE); + vminsh(a, b, src, k, SAE) +} + +/// Compare the lower half-precision (16-bit) floating-point elements in a and b, store the minimum value +/// in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and +/// copy the upper 7 packed elements from a to the upper elements of dst. Exceptions can be suppressed by +/// passing _MM_FROUND_NO_EXC in the sae parameter. Does not follow the IEEE Standard for Floating-Point Arithmetic +/// (IEEE 754) minimum value when inputs are NaN or signed-zero values. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_min_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vminsh, SAE = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_min_round_sh( + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_sae!(SAE); + _mm_mask_min_round_sh::(_mm_setzero_ph(), k, a, b) +} + #[allow(improper_ctypes)] extern "C" { #[link_name = "llvm.x86.avx512fp16.mask.cmp.sh"] @@ -7276,1941 +8447,2606 @@ extern "C" { #[link_name = "llvm.x86.avx512fp16.vcomi.sh"] fn vcomish(a: __m128h, b: __m128h, imm8: i32, sae: i32) -> i32; - #[link_name = "llvm.x86.avx512fp16.add.ph.512"] - fn vaddph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.sub.ph.512"] - fn vsubph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mul.ph.512"] - fn vmulph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.div.ph.512"] - fn vdivph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.add.ph.512"] + fn vaddph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.sub.ph.512"] + fn vsubph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mul.ph.512"] + fn vmulph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.div.ph.512"] + fn vdivph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + + #[link_name = "llvm.x86.avx512fp16.mask.add.sh.round"] + fn vaddsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.sub.sh.round"] + fn vsubsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.mul.sh.round"] + fn vmulsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.div.sh.round"] + fn vdivsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.128"] + fn vfmulcph_128(a: __m128, b: __m128, src: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.256"] + fn vfmulcph_256(a: __m256, b: __m256, src: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.512"] + fn vfmulcph_512(a: __m512, b: __m512, src: __m512, k: __mmask16, rounding: i32) -> __m512; + #[link_name = "llvm.x86.avx512fp16.mask.vfmul.csh"] + fn vfmulcsh(a: __m128, b: __m128, src: __m128, k: __mmask8, rounding: i32) -> __m128; + + #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.128"] + fn vfcmulcph_128(a: __m128, b: __m128, src: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.256"] + fn vfcmulcph_256(a: __m256, b: __m256, src: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.512"] + fn vfcmulcph_512(a: __m512, b: __m512, src: __m512, k: __mmask16, rounding: i32) -> __m512; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.csh"] + fn vfcmulcsh(a: __m128, b: __m128, src: __m128, k: __mmask8, rounding: i32) -> __m128; + + #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.128"] + fn vfmaddcph_mask3_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.128"] + fn vfmaddcph_maskz_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.256"] + fn vfmaddcph_mask3_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.256"] + fn vfmaddcph_maskz_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.512"] + fn vfmaddcph_mask3_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) -> __m512; + #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.512"] + fn vfmaddcph_maskz_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) -> __m512; + #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.csh"] + fn vfmaddcsh_mask(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; + #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.csh"] + fn vfmaddcsh_maskz(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; + + #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.128"] + fn vfcmaddcph_mask3_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.128"] + fn vfcmaddcph_maskz_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.256"] + fn vfcmaddcph_mask3_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.256"] + fn vfcmaddcph_maskz_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.512"] + fn vfcmaddcph_mask3_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) + -> __m512; + #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.512"] + fn vfcmaddcph_maskz_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) + -> __m512; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.csh"] + fn vfcmaddcsh_mask(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; + #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.csh"] + fn vfcmaddcsh_maskz(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; + + #[link_name = "llvm.x86.avx512fp16.vfmadd.ph.512"] + fn vfmaddph_512(a: __m512h, b: __m512h, c: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.fma.f16"] + fn fmaf16(a: f16, b: f16, c: f16) -> f16; // TODO: use `crate::intrinsics::fmaf16` when it's available + #[link_name = "llvm.x86.avx512fp16.vfmadd.f16"] + fn vfmaddsh(a: f16, b: f16, c: f16, rounding: i32) -> f16; + + #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.128"] + fn vfmaddsubph_128(a: __m128h, b: __m128h, c: __m128h) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.256"] + fn vfmaddsubph_256(a: __m256h, b: __m256h, c: __m256h) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.512"] + fn vfmaddsubph_512(a: __m512h, b: __m512h, c: __m512h, rounding: i32) -> __m512h; + + #[link_name = "llvm.x86.avx512fp16.mask.rcp.ph.128"] + fn vrcpph_128(a: __m128h, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.rcp.ph.256"] + fn vrcpph_256(a: __m256h, src: __m256h, k: __mmask16) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.mask.rcp.ph.512"] + fn vrcpph_512(a: __m512h, src: __m512h, k: __mmask32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.rcp.sh"] + fn vrcpsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.ph.128"] + fn vrsqrtph_128(a: __m128h, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.ph.256"] + fn vrsqrtph_256(a: __m256h, src: __m256h, k: __mmask16) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.ph.512"] + fn vrsqrtph_512(a: __m512h, src: __m512h, k: __mmask32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.sh"] + fn vrsqrtsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.sqrt.ph.512"] + fn vsqrtph_512(a: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.sqrt.sh"] + fn vsqrtsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.max.ph.128"] + fn vmaxph_128(a: __m128h, b: __m128h) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.max.ph.256"] + fn vmaxph_256(a: __m256h, b: __m256h) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.max.ph.512"] + fn vmaxph_512(a: __m512h, b: __m512h, sae: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.max.sh.round"] + fn vmaxsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, sae: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.min.ph.128"] + fn vminph_128(a: __m128h, b: __m128h) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.min.ph.256"] + fn vminph_256(a: __m256h, b: __m256h) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.min.ph.512"] + fn vminph_512(a: __m512h, b: __m512h, sae: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.min.sh.round"] + fn vminsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, sae: i32) -> __m128h; + +} + +#[cfg(test)] +mod tests { + use crate::core_arch::x86::*; + use crate::mem::transmute; + use crate::ptr::{addr_of, addr_of_mut}; + use stdarch_test::simd_test; + + #[target_feature(enable = "avx512fp16")] + unsafe fn _mm_set1_pch(re: f16, im: f16) -> __m128h { + _mm_setr_ph(re, im, re, im, re, im, re, im) + } + + #[target_feature(enable = "avx512fp16")] + unsafe fn _mm256_set1_pch(re: f16, im: f16) -> __m256h { + _mm256_setr_ph( + re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, + ) + } + + #[target_feature(enable = "avx512fp16")] + unsafe fn _mm512_set1_pch(re: f16, im: f16) -> __m512h { + _mm512_setr_ph( + re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, + re, im, re, im, re, im, re, im, re, im, + ) + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_set_ph() { + let r = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let e = _mm_setr_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_set_ph() { + let r = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let e = _mm256_setr_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_set_ph() { + let r = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let e = _mm512_setr_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_set_sh() { + let r = _mm_set_sh(1.0); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_set1_ph() { + let r = _mm_set1_ph(1.0); + let e = _mm_set_ph(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_set1_ph() { + let r = _mm256_set1_ph(1.0); + let e = _mm256_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_set1_ph() { + let r = _mm512_set1_ph(1.0); + let e = _mm512_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_setr_ph() { + let r = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let e = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_setr_ph() { + let r = _mm256_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let e = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_setr_ph() { + let r = _mm512_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let e = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_setzero_ph() { + let r = _mm_setzero_ph(); + let e = _mm_set1_ph(0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_setzero_ph() { + let r = _mm256_setzero_ph(); + let e = _mm256_set1_ph(0.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_setzero_ph() { + let r = _mm512_setzero_ph(); + let e = _mm512_set1_ph(0.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castsi128_ph() { + let a = _mm_set1_epi16(0x3c00); + let r = _mm_castsi128_ph(a); + let e = _mm_set1_ph(1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castsi256_ph() { + let a = _mm256_set1_epi16(0x3c00); + let r = _mm256_castsi256_ph(a); + let e = _mm256_set1_ph(1.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castsi512_ph() { + let a = _mm512_set1_epi16(0x3c00); + let r = _mm512_castsi512_ph(a); + let e = _mm512_set1_ph(1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castph_si128() { + let a = _mm_set1_ph(1.0); + let r = _mm_castph_si128(a); + let e = _mm_set1_epi16(0x3c00); + assert_eq_m128i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph_si256() { + let a = _mm256_set1_ph(1.0); + let r = _mm256_castph_si256(a); + let e = _mm256_set1_epi16(0x3c00); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph_si512() { + let a = _mm512_set1_ph(1.0); + let r = _mm512_castph_si512(a); + let e = _mm512_set1_epi16(0x3c00); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castps_ph() { + let a = _mm_castsi128_ps(_mm_set1_epi16(0x3c00)); + let r = _mm_castps_ph(a); + let e = _mm_set1_ph(1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castps_ph() { + let a = _mm256_castsi256_ps(_mm256_set1_epi16(0x3c00)); + let r = _mm256_castps_ph(a); + let e = _mm256_set1_ph(1.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castps_ph() { + let a = _mm512_castsi512_ps(_mm512_set1_epi16(0x3c00)); + let r = _mm512_castps_ph(a); + let e = _mm512_set1_ph(1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castph_ps() { + let a = _mm_castsi128_ph(_mm_set1_epi32(0x3f800000)); + let r = _mm_castph_ps(a); + let e = _mm_set1_ps(1.0); + assert_eq_m128(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph_ps() { + let a = _mm256_castsi256_ph(_mm256_set1_epi32(0x3f800000)); + let r = _mm256_castph_ps(a); + let e = _mm256_set1_ps(1.0); + assert_eq_m256(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph_ps() { + let a = _mm512_castsi512_ph(_mm512_set1_epi32(0x3f800000)); + let r = _mm512_castph_ps(a); + let e = _mm512_set1_ps(1.0); + assert_eq_m512(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castpd_ph() { + let a = _mm_castsi128_pd(_mm_set1_epi16(0x3c00)); + let r = _mm_castpd_ph(a); + let e = _mm_set1_ph(1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castpd_ph() { + let a = _mm256_castsi256_pd(_mm256_set1_epi16(0x3c00)); + let r = _mm256_castpd_ph(a); + let e = _mm256_set1_ph(1.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castpd_ph() { + let a = _mm512_castsi512_pd(_mm512_set1_epi16(0x3c00)); + let r = _mm512_castpd_ph(a); + let e = _mm512_set1_ph(1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castph_pd() { + let a = _mm_castsi128_ph(_mm_set1_epi64x(0x3ff0000000000000)); + let r = _mm_castph_pd(a); + let e = _mm_set1_pd(1.0); + assert_eq_m128d(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph_pd() { + let a = _mm256_castsi256_ph(_mm256_set1_epi64x(0x3ff0000000000000)); + let r = _mm256_castph_pd(a); + let e = _mm256_set1_pd(1.0); + assert_eq_m256d(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph_pd() { + let a = _mm512_castsi512_ph(_mm512_set1_epi64(0x3ff0000000000000)); + let r = _mm512_castph_pd(a); + let e = _mm512_set1_pd(1.0); + assert_eq_m512d(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph256_ph128() { + let a = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + let r = _mm256_castph256_ph128(a); + let e = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph512_ph128() { + let a = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., + ); + let r = _mm512_castph512_ph128(a); + let e = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph512_ph256() { + let a = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., + ); + let r = _mm512_castph512_ph256(a); + let e = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph128_ph256() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm256_castph128_ph256(a); + assert_eq_m128h(_mm256_castph256_ph128(r), a); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph128_ph512() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm512_castph128_ph512(a); + assert_eq_m128h(_mm512_castph512_ph128(r), a); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph256_ph512() { + let a = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + let r = _mm512_castph256_ph512(a); + assert_eq_m256h(_mm512_castph512_ph256(r), a); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_zextph128_ph256() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm256_zextph128_ph256(a); + let e = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 0., 0., 0., 0., 0., 0., 0., 0., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_zextph128_ph512() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm512_zextph128_ph512(a); + let e = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_zextph256_ph512() { + let a = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + let r = _mm512_zextph256_ph512(a); + let e = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cmp_round_sh_mask() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_cmp_round_sh_mask::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_cmp_round_sh_mask() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_mask_cmp_round_sh_mask::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(0, a, b); + assert_eq!(r, 0); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cmp_sh_mask() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_cmp_sh_mask::<_CMP_EQ_OQ>(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_cmp_sh_mask() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_mask_cmp_sh_mask::<_CMP_EQ_OQ>(0, a, b); + assert_eq!(r, 0); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comi_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_comi_round_sh::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comi_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_comi_sh::<_CMP_EQ_OQ>(a, b); + assert_eq!(r, 1); + } - #[link_name = "llvm.x86.avx512fp16.mask.add.sh.round"] - fn vaddsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.sub.sh.round"] - fn vsubsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.mul.sh.round"] - fn vmulsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.div.sh.round"] - fn vdivsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comieq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_comieq_sh(a, b); + assert_eq!(r, 1); + } - #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.128"] - fn vfmulcph_128(a: __m128, b: __m128, src: __m128, k: __mmask8) -> __m128; - #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.256"] - fn vfmulcph_256(a: __m256, b: __m256, src: __m256, k: __mmask8) -> __m256; - #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.512"] - fn vfmulcph_512(a: __m512, b: __m512, src: __m512, k: __mmask16, rounding: i32) -> __m512; - #[link_name = "llvm.x86.avx512fp16.mask.vfmul.csh"] - fn vfmulcsh(a: __m128, b: __m128, src: __m128, k: __mmask8, rounding: i32) -> __m128; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comige_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_comige_sh(a, b); + assert_eq!(r, 1); + } - #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.128"] - fn vfcmulcph_128(a: __m128, b: __m128, src: __m128, k: __mmask8) -> __m128; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.256"] - fn vfcmulcph_256(a: __m256, b: __m256, src: __m256, k: __mmask8) -> __m256; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.512"] - fn vfcmulcph_512(a: __m512, b: __m512, src: __m512, k: __mmask16, rounding: i32) -> __m512; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.csh"] - fn vfcmulcsh(a: __m128, b: __m128, src: __m128, k: __mmask8, rounding: i32) -> __m128; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comigt_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_comigt_sh(a, b); + assert_eq!(r, 1); + } - #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.128"] - fn vfmaddcph_mask3_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; - #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.128"] - fn vfmaddcph_maskz_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; - #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.256"] - fn vfmaddcph_mask3_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; - #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.256"] - fn vfmaddcph_maskz_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; - #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.512"] - fn vfmaddcph_mask3_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) -> __m512; - #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.512"] - fn vfmaddcph_maskz_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) -> __m512; - #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.csh"] - fn vfmaddcsh_mask(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; - #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.csh"] - fn vfmaddcsh_maskz(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comile_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_comile_sh(a, b); + assert_eq!(r, 1); + } - #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.128"] - fn vfcmaddcph_mask3_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; - #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.128"] - fn vfcmaddcph_maskz_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.256"] - fn vfcmaddcph_mask3_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; - #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.256"] - fn vfcmaddcph_maskz_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.512"] - fn vfcmaddcph_mask3_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) - -> __m512; - #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.512"] - fn vfcmaddcph_maskz_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) - -> __m512; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.csh"] - fn vfcmaddcsh_mask(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; - #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.csh"] - fn vfcmaddcsh_maskz(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comilt_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_comilt_sh(a, b); + assert_eq!(r, 1); + } - #[link_name = "llvm.x86.avx512fp16.vfmadd.ph.512"] - fn vfmaddph_512(a: __m512h, b: __m512h, c: __m512h, rounding: i32) -> __m512h; - #[link_name = "llvm.fma.f16"] - fn fmaf16(a: f16, b: f16, c: f16) -> f16; // TODO: use `crate::intrinsics::fmaf16` when it's available - #[link_name = "llvm.x86.avx512fp16.vfmadd.f16"] - fn vfmaddsh(a: f16, b: f16, c: f16, rounding: i32) -> f16; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comineq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_comineq_sh(a, b); + assert_eq!(r, 1); + } - #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.128"] - fn vfmaddsubph_128(a: __m128h, b: __m128h, c: __m128h) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.256"] - fn vfmaddsubph_256(a: __m256h, b: __m256h, c: __m256h) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.512"] - fn vfmaddsubph_512(a: __m512h, b: __m512h, c: __m512h, rounding: i32) -> __m512h; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomieq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_ucomieq_sh(a, b); + assert_eq!(r, 1); + } -} + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomige_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_ucomige_sh(a, b); + assert_eq!(r, 1); + } -#[cfg(test)] -mod tests { - use crate::core_arch::x86::*; - use crate::mem::transmute; - use crate::ptr::{addr_of, addr_of_mut}; - use stdarch_test::simd_test; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomigt_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_ucomigt_sh(a, b); + assert_eq!(r, 1); + } - #[target_feature(enable = "avx512fp16")] - unsafe fn _mm_set1_pch(re: f16, im: f16) -> __m128h { - _mm_setr_ph(re, im, re, im, re, im, re, im) + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomile_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_ucomile_sh(a, b); + assert_eq!(r, 1); } - #[target_feature(enable = "avx512fp16")] - unsafe fn _mm256_set1_pch(re: f16, im: f16) -> __m256h { - _mm256_setr_ph( - re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, - ) + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomilt_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_ucomilt_sh(a, b); + assert_eq!(r, 1); } - #[target_feature(enable = "avx512fp16")] - unsafe fn _mm512_set1_pch(re: f16, im: f16) -> __m512h { - _mm512_setr_ph( - re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, - re, im, re, im, re, im, re, im, re, im, - ) + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomineq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_ucomineq_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_load_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_load_ph(addr_of!(a).cast()); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_load_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_load_ph(addr_of!(a).cast()); + assert_eq_m256h(a, b); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_set_ph() { - let r = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let e = _mm_setr_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_load_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_load_ph(addr_of!(a).cast()); + assert_eq_m512h(a, b); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_set_ph() { - let r = _mm256_set_ph( + unsafe fn test_mm_load_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_load_sh(addr_of!(a).cast()); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_load_sh() { + let a = _mm_set_sh(1.0); + let src = _mm_set_sh(2.); + let b = _mm_mask_load_sh(src, 1, addr_of!(a).cast()); + assert_eq_m128h(a, b); + let b = _mm_mask_load_sh(src, 0, addr_of!(a).cast()); + assert_eq_m128h(src, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_load_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_maskz_load_sh(1, addr_of!(a).cast()); + assert_eq_m128h(a, b); + let b = _mm_maskz_load_sh(0, addr_of!(a).cast()); + assert_eq_m128h(_mm_setzero_ph(), b); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_loadu_ph() { + let array = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; + let r = _mm_loadu_ph(array.as_ptr()); + let e = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_loadu_ph() { + let array = [ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); + ]; + let r = _mm256_loadu_ph(array.as_ptr()); let e = _mm256_setr_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_set_ph() { - let r = _mm512_set_ph( + unsafe fn test_mm512_loadu_ph() { + let array = [ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, - ); + ]; + let r = _mm512_loadu_ph(array.as_ptr()); let e = _mm512_setr_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_set_sh() { - let r = _mm_set_sh(1.0); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0); + unsafe fn test_mm_move_sh() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_sh(9.0); + let r = _mm_move_sh(a, b); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_set1_ph() { - let r = _mm_set1_ph(1.0); - let e = _mm_set_ph(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0); + unsafe fn test_mm_mask_move_sh() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_sh(9.0); + let src = _mm_set_sh(10.0); + let r = _mm_mask_move_sh(src, 0, a, b); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 10.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_set1_ph() { - let r = _mm256_set1_ph(1.0); - let e = _mm256_set_ph( - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + unsafe fn test_mm_maskz_move_sh() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_sh(9.0); + let r = _mm_maskz_move_sh(0, a, b); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_store_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let mut b = _mm_setzero_ph(); + _mm_store_ph(addr_of_mut!(b).cast(), a); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_store_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - assert_eq_m256h(r, e); + let mut b = _mm256_setzero_ph(); + _mm256_store_ph(addr_of_mut!(b).cast(), a); + assert_eq_m256h(a, b); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_set1_ph() { - let r = _mm512_set1_ph(1.0); - let e = _mm512_set_ph( - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + unsafe fn test_mm512_store_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); - assert_eq_m512h(r, e); + let mut b = _mm512_setzero_ph(); + _mm512_store_ph(addr_of_mut!(b).cast(), a); + assert_eq_m512h(a, b); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_setr_ph() { - let r = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let e = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - assert_eq_m128h(r, e); + unsafe fn test_mm_store_sh() { + let a = _mm_set_sh(1.0); + let mut b = _mm_setzero_ph(); + _mm_store_sh(addr_of_mut!(b).cast(), a); + assert_eq_m128h(a, b); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_setr_ph() { - let r = _mm256_setr_ph( + unsafe fn test_mm_mask_store_sh() { + let a = _mm_set_sh(1.0); + let mut b = _mm_setzero_ph(); + _mm_mask_store_sh(addr_of_mut!(b).cast(), 0, a); + assert_eq_m128h(_mm_setzero_ph(), b); + _mm_mask_store_sh(addr_of_mut!(b).cast(), 1, a); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_storeu_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let mut array = [0.0; 8]; + _mm_storeu_ph(array.as_mut_ptr(), a); + assert_eq_m128h(a, _mm_loadu_ph(array.as_ptr())); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_storeu_ph() { + let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let e = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - assert_eq_m256h(r, e); + let mut array = [0.0; 16]; + _mm256_storeu_ph(array.as_mut_ptr(), a); + assert_eq_m256h(a, _mm256_loadu_ph(array.as_ptr())); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_setr_ph() { - let r = _mm512_setr_ph( + unsafe fn test_mm512_storeu_ph() { + let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, ); - let e = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - assert_eq_m512h(r, e); + let mut array = [0.0; 32]; + _mm512_storeu_ph(array.as_mut_ptr(), a); + assert_eq_m512h(a, _mm512_loadu_ph(array.as_ptr())); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_setzero_ph() { - let r = _mm_setzero_ph(); - let e = _mm_set1_ph(0.0); + unsafe fn test_mm_add_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_add_ph(a, b); + let e = _mm_set1_ph(9.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_setzero_ph() { - let r = _mm256_setzero_ph(); - let e = _mm256_set1_ph(0.0); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_setzero_ph() { - let r = _mm512_setzero_ph(); - let e = _mm512_set1_ph(0.0); - assert_eq_m512h(r, e); + unsafe fn test_mm_mask_add_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_add_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(10., 9., 12., 9., 14., 9., 16., 9.); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castsi128_ph() { - let a = _mm_set1_epi16(0x3c00); - let r = _mm_castsi128_ph(a); - let e = _mm_set1_ph(1.0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_add_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_maskz_add_ph(0b01010101, a, b); + let e = _mm_set_ph(0., 9., 0., 9., 0., 9., 0., 9.); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castsi256_ph() { - let a = _mm256_set1_epi16(0x3c00); - let r = _mm256_castsi256_ph(a); - let e = _mm256_set1_ph(1.0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_add_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_add_ph(a, b); + let e = _mm256_set1_ph(17.0); assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castsi512_ph() { - let a = _mm512_set1_epi16(0x3c00); - let r = _mm512_castsi512_ph(a); - let e = _mm512_set1_ph(1.0); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castph_si128() { - let a = _mm_set1_ph(1.0); - let r = _mm_castph_si128(a); - let e = _mm_set1_epi16(0x3c00); - assert_eq_m128i(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph_si256() { - let a = _mm256_set1_ph(1.0); - let r = _mm256_castph_si256(a); - let e = _mm256_set1_epi16(0x3c00); - assert_eq_m256i(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_add_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let src = _mm256_set_ph( + 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + ); + let r = _mm256_mask_add_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 18., 17., 20., 17., 22., 17., 24., 17., 26., 17., 28., 17., 30., 17., 32., 17., + ); + assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph_si512() { - let a = _mm512_set1_ph(1.0); - let r = _mm512_castph_si512(a); - let e = _mm512_set1_epi16(0x3c00); - assert_eq_m512i(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_add_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_maskz_add_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castps_ph() { - let a = _mm_castsi128_ps(_mm_set1_epi16(0x3c00)); - let r = _mm_castps_ph(a); - let e = _mm_set1_ph(1.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_add_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_add_ph(a, b); + let e = _mm512_set1_ph(33.0); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castps_ph() { - let a = _mm256_castsi256_ps(_mm256_set1_epi16(0x3c00)); - let r = _mm256_castps_ph(a); - let e = _mm256_set1_ph(1.0); - assert_eq_m256h(r, e); + unsafe fn test_mm512_mask_add_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_add_ph(src, 0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 34., 33., 36., 33., 38., 33., 40., 33., 42., 33., 44., 33., 46., 33., 48., 33., 50., + 33., 52., 33., 54., 33., 56., 33., 58., 33., 60., 33., 62., 33., 64., 33., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castps_ph() { - let a = _mm512_castsi512_ps(_mm512_set1_epi16(0x3c00)); - let r = _mm512_castps_ph(a); - let e = _mm512_set1_ph(1.0); + unsafe fn test_mm512_maskz_add_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_add_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., + 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castph_ps() { - let a = _mm_castsi128_ph(_mm_set1_epi32(0x3f800000)); - let r = _mm_castph_ps(a); - let e = _mm_set1_ps(1.0); - assert_eq_m128(r, e); + unsafe fn test_mm512_add_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_ph(33.0); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph_ps() { - let a = _mm256_castsi256_ph(_mm256_set1_epi32(0x3f800000)); - let r = _mm256_castph_ps(a); - let e = _mm256_set1_ps(1.0); - assert_eq_m256(r, e); + unsafe fn test_mm512_mask_add_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 34., 33., 36., 33., 38., 33., 40., 33., 42., 33., 44., 33., 46., 33., 48., 33., 50., + 33., 52., 33., 54., 33., 56., 33., 58., 33., 60., 33., 62., 33., 64., 33., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph_ps() { - let a = _mm512_castsi512_ph(_mm512_set1_epi32(0x3f800000)); - let r = _mm512_castph_ps(a); - let e = _mm512_set1_ps(1.0); - assert_eq_m512(r, e); + unsafe fn test_mm512_maskz_add_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., + 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castpd_ph() { - let a = _mm_castsi128_pd(_mm_set1_epi16(0x3c00)); - let r = _mm_castpd_ph(a); - let e = _mm_set1_ph(1.0); + unsafe fn test_mm_add_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(3.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castpd_ph() { - let a = _mm256_castsi256_pd(_mm256_set1_epi16(0x3c00)); - let r = _mm256_castpd_ph(a); - let e = _mm256_set1_ph(1.0); - assert_eq_m256h(r, e); + unsafe fn test_mm_mask_add_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castpd_ph() { - let a = _mm512_castsi512_pd(_mm512_set1_epi16(0x3c00)); - let r = _mm512_castpd_ph(a); - let e = _mm512_set1_ph(1.0); - assert_eq_m512h(r, e); + unsafe fn test_mm_maskz_add_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = + _mm_maskz_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castph_pd() { - let a = _mm_castsi128_ph(_mm_set1_epi64x(0x3ff0000000000000)); - let r = _mm_castph_pd(a); - let e = _mm_set1_pd(1.0); - assert_eq_m128d(r, e); + unsafe fn test_mm_add_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_add_sh(a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph_pd() { - let a = _mm256_castsi256_ph(_mm256_set1_epi64x(0x3ff0000000000000)); - let r = _mm256_castph_pd(a); - let e = _mm256_set1_pd(1.0); - assert_eq_m256d(r, e); + unsafe fn test_mm_mask_add_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_add_sh(src, 0, a, b); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_add_sh(src, 1, a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph_pd() { - let a = _mm512_castsi512_ph(_mm512_set1_epi64(0x3ff0000000000000)); - let r = _mm512_castph_pd(a); - let e = _mm512_set1_pd(1.0); - assert_eq_m512d(r, e); + unsafe fn test_mm_maskz_add_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_maskz_add_sh(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_add_sh(1, a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph256_ph128() { - let a = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., - ); - let r = _mm256_castph256_ph128(a); - let e = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_sub_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_sub_ph(a, b); + let e = _mm_set_ph(-7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph512_ph128() { - let a = _mm512_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., - 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., - ); - let r = _mm512_castph512_ph128(a); - let e = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_sub_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_sub_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(10., -5., 12., -1., 14., 3., 16., 7.); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph512_ph256() { - let a = _mm512_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., - 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_sub_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_maskz_sub_ph(0b01010101, a, b); + let e = _mm_set_ph(0., -5., 0., -1., 0., 3., 0., 7.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_sub_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, ); - let r = _mm512_castph512_ph256(a); - let e = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + let r = _mm256_sub_ph(a, b); + let e = _mm256_set_ph( + -15.0, -13.0, -11.0, -9.0, -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, + 15.0, ); assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph128_ph256() { - let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - let r = _mm256_castph128_ph256(a); - assert_eq_m128h(_mm256_castph256_ph128(r), a); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph128_ph512() { - let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - let r = _mm512_castph128_ph512(a); - assert_eq_m128h(_mm512_castph512_ph128(r), a); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph256_ph512() { - let a = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_sub_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let r = _mm512_castph256_ph512(a); - assert_eq_m256h(_mm512_castph512_ph256(r), a); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let src = _mm256_set_ph( + 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + ); + let r = _mm256_mask_sub_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 18., -13., 20., -9., 22., -5., 24., -1., 26., 3., 28., 7., 30., 11., 32., 15., + ); + assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_zextph128_ph256() { - let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - let r = _mm256_zextph128_ph256(a); - let e = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 0., 0., 0., 0., 0., 0., 0., 0., + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_sub_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_maskz_sub_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., 0., 7., 0., 11., 0., 15., ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_zextph128_ph512() { - let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - let r = _mm512_zextph128_ph512(a); - let e = _mm512_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., - 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + unsafe fn test_mm512_sub_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_sub_ph(a, b); + let e = _mm512_set_ph( + -31.0, -29.0, -27.0, -25.0, -23.0, -21.0, -19.0, -17.0, -15.0, -13.0, -11.0, -9.0, + -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, + 23.0, 25.0, 27.0, 29.0, 31.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_zextph256_ph512() { - let a = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + unsafe fn test_mm512_mask_sub_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); - let r = _mm512_zextph256_ph512(a); - let e = _mm512_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 0., 0., 0., 0., - 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_sub_ph(src, 0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 34., -29., 36., -25., 38., -21., 40., -17., 42., -13., 44., -9., 46., -5., 48., -1., + 50., 3., 52., 7., 54., 11., 56., 15., 58., 19., 60., 23., 62., 27., 64., 31., ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cmp_round_sh_mask() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_cmp_round_sh_mask::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(a, b); - assert_eq!(r, 1); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_cmp_round_sh_mask() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_mask_cmp_round_sh_mask::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(0, a, b); - assert_eq!(r, 0); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cmp_sh_mask() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_cmp_sh_mask::<_CMP_EQ_OQ>(a, b); - assert_eq!(r, 1); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_cmp_sh_mask() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_mask_cmp_sh_mask::<_CMP_EQ_OQ>(0, a, b); - assert_eq!(r, 0); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comi_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_comi_round_sh::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(a, b); - assert_eq!(r, 1); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comi_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_comi_sh::<_CMP_EQ_OQ>(a, b); - assert_eq!(r, 1); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comieq_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_comieq_sh(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_maskz_sub_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_sub_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0., -29., 0., -25., 0., -21., 0., -17., 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., + 0., 7., 0., 11., 0., 15., 0., 19., 0., 23., 0., 27., 0., 31., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comige_sh() { - let a = _mm_set_sh(2.0); - let b = _mm_set_sh(1.0); - let r = _mm_comige_sh(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_sub_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set_ph( + -31.0, -29.0, -27.0, -25.0, -23.0, -21.0, -19.0, -17.0, -15.0, -13.0, -11.0, -9.0, + -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, + 23.0, 25.0, 27.0, 29.0, 31.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comigt_sh() { - let a = _mm_set_sh(2.0); - let b = _mm_set_sh(1.0); - let r = _mm_comigt_sh(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_mask_sub_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 34., -29., 36., -25., 38., -21., 40., -17., 42., -13., 44., -9., 46., -5., 48., -1., + 50., 3., 52., 7., 54., 11., 56., 15., 58., 19., 60., 23., 62., 27., 64., 31., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comile_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_comile_sh(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_maskz_sub_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 0., -29., 0., -25., 0., -21., 0., -17., 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., + 0., 7., 0., 11., 0., 15., 0., 19., 0., 23., 0., 27., 0., 31., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comilt_sh() { + unsafe fn test_mm_sub_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_comilt_sh(a, b); - assert_eq!(r, 1); + let r = _mm_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comineq_sh() { + unsafe fn test_mm_mask_sub_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_comineq_sh(a, b); - assert_eq!(r, 1); + let src = _mm_set_sh(4.0); + let r = _mm_mask_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomieq_sh() { + unsafe fn test_mm_maskz_sub_round_sh() { let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_ucomieq_sh(a, b); - assert_eq!(r, 1); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomige_sh() { - let a = _mm_set_sh(2.0); - let b = _mm_set_sh(1.0); - let r = _mm_ucomige_sh(a, b); - assert_eq!(r, 1); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomigt_sh() { - let a = _mm_set_sh(2.0); - let b = _mm_set_sh(1.0); - let r = _mm_ucomigt_sh(a, b); - assert_eq!(r, 1); + let b = _mm_set_sh(2.0); + let r = + _mm_maskz_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomile_sh() { + unsafe fn test_mm_sub_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_ucomile_sh(a, b); - assert_eq!(r, 1); + let r = _mm_sub_sh(a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomilt_sh() { + unsafe fn test_mm_mask_sub_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_ucomilt_sh(a, b); - assert_eq!(r, 1); + let src = _mm_set_sh(4.0); + let r = _mm_mask_sub_sh(src, 0, a, b); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_sub_sh(src, 1, a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomineq_sh() { + unsafe fn test_mm_maskz_sub_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_ucomineq_sh(a, b); - assert_eq!(r, 1); + let r = _mm_maskz_sub_sh(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_sub_sh(1, a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_load_ph() { + unsafe fn test_mm_mul_ph() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_load_ph(addr_of!(a).cast()); - assert_eq_m128h(a, b); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_mul_ph(a, b); + let e = _mm_set_ph(8.0, 14.0, 18.0, 20.0, 20.0, 18.0, 14.0, 8.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_load_ph() { + unsafe fn test_mm_mask_mul_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_mul_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(10., 14., 12., 20., 14., 18., 16., 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_mul_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_maskz_mul_ph(0b01010101, a, b); + let e = _mm_set_ph(0., 14., 0., 20., 0., 18., 0., 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mul_ph() { let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let b = _mm256_load_ph(addr_of!(a).cast()); - assert_eq_m256h(a, b); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_mul_ph(a, b); + let e = _mm256_set_ph( + 16.0, 30.0, 42.0, 52.0, 60.0, 66.0, 70.0, 72.0, 72.0, 70.0, 66.0, 60.0, 52.0, 42.0, + 30.0, 16.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_mul_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let src = _mm256_set_ph( + 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + ); + let r = _mm256_mask_mul_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 18., 30., 20., 52., 22., 66., 24., 72., 26., 70., 28., 60., 30., 42., 32., 16., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_mul_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_maskz_mul_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0., 30., 0., 52., 0., 66., 0., 72., 0., 70., 0., 60., 0., 42., 0., 16., + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_load_ph() { + unsafe fn test_mm512_mul_ph() { let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, ); - let b = _mm512_load_ph(addr_of!(a).cast()); - assert_eq_m512h(a, b); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_mul_ph(a, b); + let e = _mm512_set_ph( + 32.0, 62.0, 90.0, 116.0, 140.0, 162.0, 182.0, 200.0, 216.0, 230.0, 242.0, 252.0, 260.0, + 266.0, 270.0, 272.0, 272.0, 270.0, 266.0, 260.0, 252.0, 242.0, 230.0, 216.0, 200.0, + 182.0, 162.0, 140.0, 116.0, 90.0, 62.0, 32.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_load_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_load_sh(addr_of!(a).cast()); - assert_eq_m128h(a, b); + unsafe fn test_mm512_mask_mul_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_mul_ph(src, 0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 34., 62., 36., 116., 38., 162., 40., 200., 42., 230., 44., 252., 46., 266., 48., 272., + 50., 270., 52., 260., 54., 242., 56., 216., 58., 182., 60., 140., 62., 90., 64., 32., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_load_sh() { - let a = _mm_set_sh(1.0); - let src = _mm_set_sh(2.); - let b = _mm_mask_load_sh(src, 1, addr_of!(a).cast()); - assert_eq_m128h(a, b); - let b = _mm_mask_load_sh(src, 0, addr_of!(a).cast()); - assert_eq_m128h(src, b); + unsafe fn test_mm512_maskz_mul_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_mul_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0., 62., 0., 116., 0., 162., 0., 200., 0., 230., 0., 252., 0., 266., 0., 272., 0., + 270., 0., 260., 0., 242., 0., 216., 0., 182., 0., 140., 0., 90., 0., 32., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_load_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_maskz_load_sh(1, addr_of!(a).cast()); - assert_eq_m128h(a, b); - let b = _mm_maskz_load_sh(0, addr_of!(a).cast()); - assert_eq_m128h(_mm_setzero_ph(), b); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_loadu_ph() { - let array = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; - let r = _mm_loadu_ph(array.as_ptr()); - let e = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_loadu_ph() { - let array = [ - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ]; - let r = _mm256_loadu_ph(array.as_ptr()); - let e = _mm256_setr_ph( + unsafe fn test_mm512_mul_round_ph() { + let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set_ph( + 32.0, 62.0, 90.0, 116.0, 140.0, 162.0, 182.0, 200.0, 216.0, 230.0, 242.0, 252.0, 260.0, + 266.0, 270.0, 272.0, 272.0, 270.0, 266.0, 260.0, 252.0, 242.0, 230.0, 216.0, 200.0, + 182.0, 162.0, 140.0, 116.0, 90.0, 62.0, 32.0, ); - assert_eq_m256h(r, e); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_loadu_ph() { - let array = [ - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ]; - let r = _mm512_loadu_ph(array.as_ptr()); - let e = _mm512_setr_ph( + unsafe fn test_mm512_mask_mul_round_ph() { + let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 34., 62., 36., 116., 38., 162., 40., 200., 42., 230., 44., 252., 46., 266., 48., 272., + 50., 270., 52., 260., 54., 242., 56., 216., 58., 182., 60., 140., 62., 90., 64., 32., + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_move_sh() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_sh(9.0); - let r = _mm_move_sh(a, b); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_maskz_mul_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 0., 62., 0., 116., 0., 162., 0., 200., 0., 230., 0., 252., 0., 266., 0., 272., 0., + 270., 0., 260., 0., 242., 0., 216., 0., 182., 0., 140., 0., 90., 0., 32., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_move_sh() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_sh(9.0); - let src = _mm_set_sh(10.0); - let r = _mm_mask_move_sh(src, 0, a, b); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 10.0); + unsafe fn test_mm_mul_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(2.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_move_sh() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_sh(9.0); - let r = _mm_maskz_move_sh(0, a, b); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 0.0); + unsafe fn test_mm_mask_mul_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_set_sh(4.0); assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_store_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let mut b = _mm_setzero_ph(); - _mm_store_ph(addr_of_mut!(b).cast(), a); - assert_eq_m128h(a, b); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_store_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + let r = _mm_mask_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, ); - let mut b = _mm256_setzero_ph(); - _mm256_store_ph(addr_of_mut!(b).cast(), a); - assert_eq_m256h(a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_store_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let mut b = _mm512_setzero_ph(); - _mm512_store_ph(addr_of_mut!(b).cast(), a); - assert_eq_m512h(a, b); + unsafe fn test_mm_maskz_mul_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = + _mm_maskz_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_store_sh() { + unsafe fn test_mm_mul_sh() { let a = _mm_set_sh(1.0); - let mut b = _mm_setzero_ph(); - _mm_store_sh(addr_of_mut!(b).cast(), a); - assert_eq_m128h(a, b); + let b = _mm_set_sh(2.0); + let r = _mm_mul_sh(a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_store_sh() { + unsafe fn test_mm_mask_mul_sh() { let a = _mm_set_sh(1.0); - let mut b = _mm_setzero_ph(); - _mm_mask_store_sh(addr_of_mut!(b).cast(), 0, a); - assert_eq_m128h(_mm_setzero_ph(), b); - _mm_mask_store_sh(addr_of_mut!(b).cast(), 1, a); - assert_eq_m128h(a, b); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_storeu_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let mut array = [0.0; 8]; - _mm_storeu_ph(array.as_mut_ptr(), a); - assert_eq_m128h(a, _mm_loadu_ph(array.as_ptr())); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_storeu_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let mut array = [0.0; 16]; - _mm256_storeu_ph(array.as_mut_ptr(), a); - assert_eq_m256h(a, _mm256_loadu_ph(array.as_ptr())); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_mul_sh(src, 0, a, b); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_mul_sh(src, 1, a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_storeu_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let mut array = [0.0; 32]; - _mm512_storeu_ph(array.as_mut_ptr(), a); - assert_eq_m512h(a, _mm512_loadu_ph(array.as_ptr())); + unsafe fn test_mm_maskz_mul_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_maskz_mul_sh(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_mul_sh(1, a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_add_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_add_ph(a, b); - let e = _mm_set1_ph(9.0); + unsafe fn test_mm_div_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let r = _mm_div_ph(a, b); + let e = _mm_set1_ph(0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_add_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm_mask_add_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(10., 9., 12., 9., 14., 9., 16., 9.); + unsafe fn test_mm_mask_div_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let src = _mm_set_ph(4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0); + let r = _mm_mask_div_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_add_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_maskz_add_ph(0b01010101, a, b); - let e = _mm_set_ph(0., 9., 0., 9., 0., 9., 0., 9.); + unsafe fn test_mm_maskz_div_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let r = _mm_maskz_div_ph(0b01010101, a, b); + let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_add_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_add_ph(a, b); - let e = _mm256_set1_ph(17.0); + unsafe fn test_mm256_div_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let r = _mm256_div_ph(a, b); + let e = _mm256_set1_ph(0.5); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_add_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); + unsafe fn test_mm256_mask_div_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); let src = _mm256_set_ph( - 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, + 19.0, ); - let r = _mm256_mask_add_ph(src, 0b0101010101010101, a, b); + let r = _mm256_mask_div_ph(src, 0b0101010101010101, a, b); let e = _mm256_set_ph( - 18., 17., 20., 17., 22., 17., 24., 17., 26., 17., 28., 17., 30., 17., 32., 17., + 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_add_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_maskz_add_ph(0b0101010101010101, a, b); + unsafe fn test_mm256_maskz_div_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let r = _mm256_maskz_div_ph(0b0101010101010101, a, b); let e = _mm256_set_ph( - 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_add_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_add_ph(a, b); - let e = _mm512_set1_ph(33.0); + unsafe fn test_mm512_div_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_div_ph(a, b); + let e = _mm512_set1_ph(0.5); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_add_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); + unsafe fn test_mm512_mask_div_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, + 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, + 33.0, 34.0, 35.0, ); - let r = _mm512_mask_add_ph(src, 0b01010101010101010101010101010101, a, b); + let r = _mm512_mask_div_ph(src, 0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 34., 33., 36., 33., 38., 33., 40., 33., 42., 33., 44., 33., 46., 33., 48., 33., 50., - 33., 52., 33., 54., 33., 56., 33., 58., 33., 60., 33., 62., 33., 64., 33., + 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, + 20.0, 0.5, 22.0, 0.5, 24.0, 0.5, 26.0, 0.5, 28.0, 0.5, 30.0, 0.5, 32.0, 0.5, 34.0, 0.5, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_add_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_add_ph(0b01010101010101010101010101010101, a, b); + unsafe fn test_mm512_maskz_div_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_maskz_div_ph(0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., - 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, + 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_add_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_ph(33.0); + unsafe fn test_mm512_div_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_ph(0.5); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_add_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); + unsafe fn test_mm512_mask_div_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, + 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, + 33.0, 34.0, 35.0, ); - let r = _mm512_mask_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 0b01010101010101010101010101010101, a, b, ); let e = _mm512_set_ph( - 34., 33., 36., 33., 38., 33., 40., 33., 42., 33., 44., 33., 46., 33., 48., 33., 50., - 33., 52., 33., 54., 33., 56., 33., 58., 33., 60., 33., 62., 33., 64., 33., + 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, + 20.0, 0.5, 22.0, 0.5, 24.0, 0.5, 26.0, 0.5, 28.0, 0.5, 30.0, 0.5, 32.0, 0.5, 34.0, 0.5, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_add_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_maskz_div_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_maskz_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b01010101010101010101010101010101, a, b, ); let e = _mm512_set_ph( - 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., - 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, + 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_add_round_sh() { + unsafe fn test_mm_div_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_set_sh(3.0); + let r = _mm_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_add_round_sh() { + unsafe fn test_mm_mask_div_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); let src = _mm_set_sh(4.0); - let r = _mm_mask_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 0, a, b, ); let e = _mm_set_sh(4.0); assert_eq_m128h(r, e); - let r = _mm_mask_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 1, a, b, ); - let e = _mm_set_sh(3.0); + let e = _mm_set_sh(0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_add_round_sh() { + unsafe fn test_mm_maskz_div_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); let r = - _mm_maskz_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + _mm_maskz_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); let e = _mm_set_sh(0.0); assert_eq_m128h(r, e); let r = - _mm_maskz_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_set_sh(3.0); + _mm_maskz_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_add_sh() { + unsafe fn test_mm_div_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_add_sh(a, b); - let e = _mm_set_sh(3.0); + let r = _mm_div_sh(a, b); + let e = _mm_set_sh(0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_add_sh() { + unsafe fn test_mm_mask_div_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); let src = _mm_set_sh(4.0); - let r = _mm_mask_add_sh(src, 0, a, b); + let r = _mm_mask_div_sh(src, 0, a, b); let e = _mm_set_sh(4.0); assert_eq_m128h(r, e); - let r = _mm_mask_add_sh(src, 1, a, b); - let e = _mm_set_sh(3.0); + let r = _mm_mask_div_sh(src, 1, a, b); + let e = _mm_set_sh(0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_add_sh() { + unsafe fn test_mm_maskz_div_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_maskz_add_sh(0, a, b); + let r = _mm_maskz_div_sh(0, a, b); let e = _mm_set_sh(0.0); assert_eq_m128h(r, e); - let r = _mm_maskz_add_sh(1, a, b); - let e = _mm_set_sh(3.0); + let r = _mm_maskz_div_sh(1, a, b); + let e = _mm_set_sh(0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_sub_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_sub_ph(a, b); - let e = _mm_set_ph(-7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0); + unsafe fn test_mm_mul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let r = _mm_mul_pch(a, b); + let e = _mm_set1_pch(-1.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_sub_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm_mask_sub_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(10., -5., 12., -1., 14., 3., 16., 7.); + unsafe fn test_mm_mask_mul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); + let r = _mm_mask_mul_pch(src, 0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_sub_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_maskz_sub_ph(0b01010101, a, b); - let e = _mm_set_ph(0., -5., 0., -1., 0., 3., 0., 7.); + unsafe fn test_mm_maskz_mul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let r = _mm_maskz_mul_pch(0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_sub_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_sub_ph(a, b); - let e = _mm256_set_ph( - -15.0, -13.0, -11.0, -9.0, -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, - 15.0, - ); + unsafe fn test_mm256_mul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_mul_pch(a, b); + let e = _mm256_set1_pch(-1.0, 0.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_sub_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let src = _mm256_set_ph( - 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + unsafe fn test_mm256_mask_mul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let src = _mm256_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, ); - let r = _mm256_mask_sub_ph(src, 0b0101010101010101, a, b); - let e = _mm256_set_ph( - 18., -13., 20., -9., 22., -5., 24., -1., 26., 3., 28., 7., 30., 11., 32., 15., + let r = _mm256_mask_mul_pch(src, 0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_sub_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_maskz_sub_ph(0b0101010101010101, a, b); - let e = _mm256_set_ph( - 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., 0., 7., 0., 11., 0., 15., + unsafe fn test_mm256_maskz_mul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_maskz_mul_pch(0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_sub_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_sub_ph(a, b); - let e = _mm512_set_ph( - -31.0, -29.0, -27.0, -25.0, -23.0, -21.0, -19.0, -17.0, -15.0, -13.0, -11.0, -9.0, - -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, - 23.0, 25.0, 27.0, 29.0, 31.0, - ); + unsafe fn test_mm512_mul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_mul_pch(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_sub_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + unsafe fn test_mm512_mask_mul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, ); - let r = _mm512_mask_sub_ph(src, 0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 34., -29., 36., -25., 38., -21., 40., -17., 42., -13., 44., -9., 46., -5., 48., -1., - 50., 3., 52., 7., 54., 11., 56., 15., 58., 19., 60., 23., 62., 27., 64., 31., + let r = _mm512_mask_mul_pch(src, 0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_sub_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_sub_ph(0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 0., -29., 0., -25., 0., -21., 0., -17., 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., - 0., 7., 0., 11., 0., 15., 0., 19., 0., 23., 0., 27., 0., 31., + unsafe fn test_mm512_maskz_mul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_mul_pch(0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_sub_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set_ph( - -31.0, -29.0, -27.0, -25.0, -23.0, -21.0, -19.0, -17.0, -15.0, -13.0, -11.0, -9.0, - -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, - 23.0, 25.0, 27.0, 29.0, 31.0, - ); + unsafe fn test_mm512_mul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_sub_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + unsafe fn test_mm512_mask_mul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, ); - let r = _mm512_mask_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, - 0b01010101010101010101010101010101, + 0b0101010101010101, a, b, ); - let e = _mm512_set_ph( - 34., -29., 36., -25., 38., -21., 40., -17., 42., -13., 44., -9., 46., -5., 48., -1., - 50., 3., 52., 7., 54., 11., 56., 15., 58., 19., 60., 23., 62., 27., 64., 31., + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_sub_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, + unsafe fn test_mm512_maskz_mul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, a, b, ); - let e = _mm512_set_ph( - 0., -29., 0., -25., 0., -21., 0., -17., 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., - 0., 7., 0., 11., 0., 15., 0., 19., 0., 23., 0., 27., 0., 31., + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_sub_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_set_sh(-1.0); + unsafe fn test_mm_mul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_sub_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_mask_mul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 0, a, b, ); - let e = _mm_set_sh(4.0); - assert_eq_m128h(r, e); - let r = _mm_mask_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_set_sh(-1.0); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_sub_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = - _mm_maskz_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_set_sh(0.0); - assert_eq_m128h(r, e); + unsafe fn test_mm_maskz_mul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); let r = - _mm_maskz_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_set_sh(-1.0); + _mm_maskz_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_sub_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_sub_sh(a, b); - let e = _mm_set_sh(-1.0); + unsafe fn test_mm_mul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_mul_sch(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_sub_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_sub_sh(src, 0, a, b); - let e = _mm_set_sh(4.0); - assert_eq_m128h(r, e); - let r = _mm_mask_sub_sh(src, 1, a, b); - let e = _mm_set_sh(-1.0); + unsafe fn test_mm_mask_mul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_mul_sch(src, 0, a, b); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_sub_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_maskz_sub_sh(0, a, b); - let e = _mm_set_sh(0.0); - assert_eq_m128h(r, e); - let r = _mm_maskz_sub_sh(1, a, b); - let e = _mm_set_sh(-1.0); + unsafe fn test_mm_maskz_mul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_maskz_mul_sch(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mul_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_mul_ph(a, b); - let e = _mm_set_ph(8.0, 14.0, 18.0, 20.0, 20.0, 18.0, 14.0, 8.0); + unsafe fn test_mm_fmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let r = _mm_fmul_pch(a, b); + let e = _mm_set1_pch(-1.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_mul_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm_mask_mul_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(10., 14., 12., 20., 14., 18., 16., 8.); + unsafe fn test_mm_mask_fmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); + let r = _mm_mask_fmul_pch(src, 0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_mul_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_maskz_mul_ph(0b01010101, a, b); - let e = _mm_set_ph(0., 14., 0., 20., 0., 18., 0., 8.); + unsafe fn test_mm_maskz_fmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let r = _mm_maskz_fmul_pch(0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mul_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_mul_ph(a, b); - let e = _mm256_set_ph( - 16.0, 30.0, 42.0, 52.0, 60.0, 66.0, 70.0, 72.0, 72.0, 70.0, 66.0, 60.0, 52.0, 42.0, - 30.0, 16.0, - ); + unsafe fn test_mm256_fmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_fmul_pch(a, b); + let e = _mm256_set1_pch(-1.0, 0.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_mul_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let src = _mm256_set_ph( - 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + unsafe fn test_mm256_mask_fmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let src = _mm256_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, ); - let r = _mm256_mask_mul_ph(src, 0b0101010101010101, a, b); - let e = _mm256_set_ph( - 18., 30., 20., 52., 22., 66., 24., 72., 26., 70., 28., 60., 30., 42., 32., 16., + let r = _mm256_mask_fmul_pch(src, 0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_mul_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_maskz_mul_ph(0b0101010101010101, a, b); - let e = _mm256_set_ph( - 0., 30., 0., 52., 0., 66., 0., 72., 0., 70., 0., 60., 0., 42., 0., 16., + unsafe fn test_mm256_maskz_fmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_maskz_fmul_pch(0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mul_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_mul_ph(a, b); - let e = _mm512_set_ph( - 32.0, 62.0, 90.0, 116.0, 140.0, 162.0, 182.0, 200.0, 216.0, 230.0, 242.0, 252.0, 260.0, - 266.0, 270.0, 272.0, 272.0, 270.0, 266.0, 260.0, 252.0, 242.0, 230.0, 216.0, 200.0, - 182.0, 162.0, 140.0, 116.0, 90.0, 62.0, 32.0, - ); + unsafe fn test_mm512_fmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_fmul_pch(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_mul_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + unsafe fn test_mm512_mask_fmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, ); - let r = _mm512_mask_mul_ph(src, 0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 34., 62., 36., 116., 38., 162., 40., 200., 42., 230., 44., 252., 46., 266., 48., 272., - 50., 270., 52., 260., 54., 242., 56., 216., 58., 182., 60., 140., 62., 90., 64., 32., + let r = _mm512_mask_fmul_pch(src, 0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_mul_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_mul_ph(0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 0., 62., 0., 116., 0., 162., 0., 200., 0., 230., 0., 252., 0., 266., 0., 272., 0., - 270., 0., 260., 0., 242., 0., 216., 0., 182., 0., 140., 0., 90., 0., 32., + unsafe fn test_mm512_maskz_fmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_fmul_pch(0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mul_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set_ph( - 32.0, 62.0, 90.0, 116.0, 140.0, 162.0, 182.0, 200.0, 216.0, 230.0, 242.0, 252.0, 260.0, - 266.0, 270.0, 272.0, 272.0, 270.0, 266.0, 260.0, 252.0, 242.0, 230.0, 216.0, 200.0, - 182.0, 162.0, 140.0, 116.0, 90.0, 62.0, 32.0, - ); + unsafe fn test_mm512_fmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_mul_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + unsafe fn test_mm512_mask_fmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, ); - let r = _mm512_mask_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, - 0b01010101010101010101010101010101, + 0b0101010101010101, a, b, ); - let e = _mm512_set_ph( - 34., 62., 36., 116., 38., 162., 40., 200., 42., 230., 44., 252., 46., 266., 48., 272., - 50., 270., 52., 260., 54., 242., 56., 216., 58., 182., 60., 140., 62., 90., 64., 32., + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_mul_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, + unsafe fn test_mm512_maskz_fmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, a, b, ); - let e = _mm512_set_ph( - 0., 62., 0., 116., 0., 162., 0., 200., 0., 230., 0., 252., 0., 266., 0., 272., 0., - 270., 0., 260., 0., 242., 0., 216., 0., 182., 0., 140., 0., 90., 0., 32., + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mul_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_set_sh(2.0); + unsafe fn test_mm_fmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_mul_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_mask_fmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 0, a, b, ); - let e = _mm_set_sh(4.0); - assert_eq_m128h(r, e); - let r = _mm_mask_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_set_sh(2.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_mul_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = - _mm_maskz_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_set_sh(0.0); - assert_eq_m128h(r, e); - let r = - _mm_maskz_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_set_sh(2.0); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mul_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_mul_sh(a, b); - let e = _mm_set_sh(2.0); + unsafe fn test_mm_maskz_fmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = + _mm_maskz_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_mul_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_mul_sh(src, 0, a, b); - let e = _mm_set_sh(4.0); - assert_eq_m128h(r, e); - let r = _mm_mask_mul_sh(src, 1, a, b); - let e = _mm_set_sh(2.0); + unsafe fn test_mm_fmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_fmul_sch(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_mul_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_maskz_mul_sh(0, a, b); - let e = _mm_set_sh(0.0); + unsafe fn test_mm_mask_fmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_fmul_sch(src, 0, a, b); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); - let r = _mm_maskz_mul_sh(1, a, b); - let e = _mm_set_sh(2.0); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_fmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_maskz_fmul_sch(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_div_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let r = _mm_div_ph(a, b); - let e = _mm_set1_ph(0.5); + unsafe fn test_mm_cmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let r = _mm_cmul_pch(a, b); + let e = _mm_set1_pch(-1.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_div_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let src = _mm_set_ph(4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0); - let r = _mm_mask_div_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5); + unsafe fn test_mm_mask_cmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); + let r = _mm_mask_cmul_pch(src, 0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_div_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let r = _mm_maskz_div_ph(0b01010101, a, b); - let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); + unsafe fn test_mm_maskz_cmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let r = _mm_maskz_cmul_pch(0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_div_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let r = _mm256_div_ph(a, b); - let e = _mm256_set1_ph(0.5); + unsafe fn test_mm256_cmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let r = _mm256_cmul_pch(a, b); + let e = _mm256_set1_pch(-1.0, 0.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_div_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let src = _mm256_set_ph( - 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, - 19.0, + unsafe fn test_mm256_mask_cmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let src = _mm256_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, ); - let r = _mm256_mask_div_ph(src, 0b0101010101010101, a, b); - let e = _mm256_set_ph( - 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, + let r = _mm256_mask_cmul_pch(src, 0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_div_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let r = _mm256_maskz_div_ph(0b0101010101010101, a, b); - let e = _mm256_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + unsafe fn test_mm256_maskz_cmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let r = _mm256_maskz_cmul_pch(0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_div_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let r = _mm512_div_ph(a, b); - let e = _mm512_set1_ph(0.5); + unsafe fn test_mm512_cmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_cmul_pch(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_div_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let src = _mm512_set_ph( - 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, - 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, - 33.0, 34.0, 35.0, + unsafe fn test_mm512_mask_cmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, ); - let r = _mm512_mask_div_ph(src, 0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, - 20.0, 0.5, 22.0, 0.5, 24.0, 0.5, 26.0, 0.5, 28.0, 0.5, 30.0, 0.5, 32.0, 0.5, 34.0, 0.5, + let r = _mm512_mask_cmul_pch(src, 0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_div_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let r = _mm512_maskz_div_ph(0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, - 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + unsafe fn test_mm512_maskz_cmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_maskz_cmul_pch(0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_div_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let r = _mm512_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_ph(0.5); + unsafe fn test_mm512_cmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_div_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let src = _mm512_set_ph( - 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, - 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, - 33.0, 34.0, 35.0, + unsafe fn test_mm512_mask_cmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, ); - let r = _mm512_mask_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, - 0b01010101010101010101010101010101, + 0b0101010101010101, a, b, ); - let e = _mm512_set_ph( - 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, - 20.0, 0.5, 22.0, 0.5, 24.0, 0.5, 26.0, 0.5, 28.0, 0.5, 30.0, 0.5, 32.0, 0.5, 34.0, 0.5, + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_div_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let r = _mm512_maskz_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, + unsafe fn test_mm512_maskz_cmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_maskz_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, a, b, ); - let e = _mm512_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, - 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_div_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_set_sh(0.5); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_div_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_set_sh(4.0); - assert_eq_m128h(r, e); - let r = _mm_mask_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_set_sh(0.5); + unsafe fn test_mm_cmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_cmul_sch(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_div_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = - _mm_maskz_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_set_sh(0.0); - assert_eq_m128h(r, e); - let r = - _mm_maskz_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_set_sh(0.5); + unsafe fn test_mm_mask_cmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_cmul_sch(src, 0, a, b); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_div_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_div_sh(a, b); - let e = _mm_set_sh(0.5); + unsafe fn test_mm_maskz_cmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_maskz_cmul_sch(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_div_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_div_sh(src, 0, a, b); - let e = _mm_set_sh(4.0); - assert_eq_m128h(r, e); - let r = _mm_mask_div_sh(src, 1, a, b); - let e = _mm_set_sh(0.5); + unsafe fn test_mm_cmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_div_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_maskz_div_sh(0, a, b); - let e = _mm_set_sh(0.0); + unsafe fn test_mm_mask_cmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); - let r = _mm_maskz_div_sh(1, a, b); - let e = _mm_set_sh(0.5); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_cmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = + _mm_maskz_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mul_pch() { + unsafe fn test_mm_fcmul_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); - let r = _mm_mul_pch(a, b); + let b = _mm_set1_pch(0.0, -1.0); + let r = _mm_fcmul_pch(a, b); let e = _mm_set1_pch(-1.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_mul_pch() { + unsafe fn test_mm_mask_fcmul_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); - let r = _mm_mask_mul_pch(src, 0b0101, a, b); + let r = _mm_mask_fcmul_pch(src, 0b0101, a, b); let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_mul_pch() { + unsafe fn test_mm_maskz_fcmul_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); - let r = _mm_maskz_mul_pch(0b0101, a, b); + let b = _mm_set1_pch(0.0, -1.0); + let r = _mm_maskz_fcmul_pch(0b0101, a, b); let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mul_pch() { + unsafe fn test_mm256_fcmul_pch() { let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_mul_pch(a, b); + let b = _mm256_set1_pch(0.0, -1.0); + let r = _mm256_fcmul_pch(a, b); let e = _mm256_set1_pch(-1.0, 0.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_mul_pch() { + unsafe fn test_mm256_mask_fcmul_pch() { let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); let src = _mm256_setr_ph( 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, ); - let r = _mm256_mask_mul_pch(src, 0b01010101, a, b); + let r = _mm256_mask_fcmul_pch(src, 0b01010101, a, b); let e = _mm256_setr_ph( -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, ); @@ -9218,10 +11054,10 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_mul_pch() { + unsafe fn test_mm256_maskz_fcmul_pch() { let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_maskz_mul_pch(0b01010101, a, b); + let b = _mm256_set1_pch(0.0, -1.0); + let r = _mm256_maskz_fcmul_pch(0b01010101, a, b); let e = _mm256_setr_ph( -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); @@ -9229,24 +11065,24 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mul_pch() { + unsafe fn test_mm512_fcmul_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_mul_pch(a, b); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_fcmul_pch(a, b); let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_mul_pch() { + unsafe fn test_mm512_mask_fcmul_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); let src = _mm512_setr_ph( 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, ); - let r = _mm512_mask_mul_pch(src, 0b0101010101010101, a, b); + let r = _mm512_mask_fcmul_pch(src, 0b0101010101010101, a, b); let e = _mm512_setr_ph( -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, @@ -9256,10 +11092,10 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_mul_pch() { + unsafe fn test_mm512_maskz_fcmul_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_maskz_mul_pch(0b0101010101010101, a, b); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_maskz_fcmul_pch(0b0101010101010101, a, b); let e = _mm512_setr_ph( -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, @@ -9268,24 +11104,24 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mul_round_pch() { + unsafe fn test_mm512_fcmul_round_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_mul_round_pch() { + unsafe fn test_mm512_mask_fcmul_round_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); let src = _mm512_setr_ph( 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, ); - let r = _mm512_mask_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 0b0101010101010101, a, @@ -9300,10 +11136,10 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_mul_round_pch() { + unsafe fn test_mm512_maskz_fcmul_round_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_maskz_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_maskz_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b0101010101010101, a, b, @@ -9316,3088 +11152,3321 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mul_round_sch() { + unsafe fn test_mm_fcmul_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_fcmul_sch(a, b); let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_mul_round_sch() { + unsafe fn test_mm_mask_fcmul_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask_fcmul_sch(src, 0, a, b); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_fcmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_maskz_fcmul_sch(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_fcmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_fcmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 0, a, b, ); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_fcmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = + _mm_maskz_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_abs_ph() { + let a = _mm_set_ph(-1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0); + let r = _mm_abs_ph(a); + let e = _mm_set_ph(1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_abs_ph() { + let a = _mm256_set_ph( + -1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, + -14.0, + ); + let r = _mm256_abs_ph(a); + let e = _mm256_set_ph( + 1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_abs_ph() { + let a = _mm512_set_ph( + -1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, + -14.0, 15.0, -16.0, 17.0, -18.0, 19.0, -20.0, 21.0, -22.0, 23.0, -24.0, 25.0, -26.0, + 27.0, -28.0, 29.0, -30.0, + ); + let r = _mm512_abs_ph(a); + let e = _mm512_set_ph( + 1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, + 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, + 29.0, 30.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_conj_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let r = _mm_conj_pch(a); + let e = _mm_set1_pch(0.0, -1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_conj_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); + let r = _mm_mask_conj_pch(src, 0b0101, a); + let e = _mm_setr_ph(0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_conj_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let r = _mm_maskz_conj_pch(0b0101, a); + let e = _mm_setr_ph(0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_conj_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_conj_pch(a); + let e = _mm256_set1_pch(0.0, -1.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_conj_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let src = _mm256_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + ); + let r = _mm256_mask_conj_pch(src, 0b01010101, a); + let e = _mm256_setr_ph( + 0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0, 0.0, -1.0, 12.0, 13.0, 0.0, -1.0, 16.0, 17.0, + ); + assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_mul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = - _mm_maskz_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_conj_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_maskz_conj_pch(0b01010101, a); + let e = _mm256_setr_ph( + 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_mul_sch(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_conj_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_conj_pch(a); + let e = _mm512_set1_pch(0.0, -1.0); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_mul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_mul_sch(src, 0, a, b); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_mask_conj_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, + ); + let r = _mm512_mask_conj_pch(src, 0b0101010101010101, a); + let e = _mm512_setr_ph( + 0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0, 0.0, -1.0, 12.0, 13.0, 0.0, -1.0, 16.0, 17.0, + 0.0, -1.0, 20.0, 21.0, 0.0, -1.0, 24.0, 25.0, 0.0, -1.0, 28.0, 29.0, 0.0, -1.0, 32.0, + 33.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_mul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_maskz_mul_sch(0, a, b); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm512_maskz_conj_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_conj_pch(0b0101010101010101, a); + let e = _mm512_setr_ph( + 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, + 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_fmadd_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_fmadd_pch(a, b, c); + let e = _mm_set1_pch(-2.0, 3.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmul_pch() { + unsafe fn test_mm_mask_fmadd_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); - let r = _mm_fmul_pch(a, b); - let e = _mm_set1_pch(-1.0, 0.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_mask_fmadd_pch(a, 0b0101, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmul_pch() { + unsafe fn test_mm_mask3_fmadd_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); - let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); - let r = _mm_mask_fmul_pch(src, 0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_mask3_fmadd_pch(a, b, c, 0b0101); + let e = _mm_setr_ph(-2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmul_pch() { + unsafe fn test_mm_maskz_fmadd_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); - let r = _mm_maskz_fmul_pch(0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_maskz_fmadd_pch(0b0101, a, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmul_pch() { + unsafe fn test_mm256_fmadd_pch() { let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_fmul_pch(a, b); - let e = _mm256_set1_pch(-1.0, 0.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_fmadd_pch(a, b, c); + let e = _mm256_set1_pch(-2.0, 3.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmul_pch() { + unsafe fn test_mm256_mask_fmadd_pch() { let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); - let src = _mm256_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_mask_fmadd_pch(a, 0b01010101, b, c); + let e = _mm256_setr_ph( + -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, ); - let r = _mm256_mask_fmul_pch(src, 0b01010101, a, b); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask3_fmadd_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_mask3_fmadd_pch(a, b, c, 0b01010101); let e = _mm256_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmul_pch() { + unsafe fn test_mm256_maskz_fmadd_pch() { let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_maskz_fmul_pch(0b01010101, a, b); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_maskz_fmadd_pch(0b01010101, a, b, c); let e = _mm256_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmul_pch() { + unsafe fn test_mm512_fmadd_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_fmul_pch(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_fmadd_pch(a, b, c); + let e = _mm512_set1_pch(-2.0, 3.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmul_pch() { + unsafe fn test_mm512_mask_fmadd_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask_fmadd_pch(a, 0b0101010101010101, b, c); + let e = _mm512_setr_ph( + -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, + -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, ); - let r = _mm512_mask_fmul_pch(src, 0b0101010101010101, a, b); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask3_fmadd_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask3_fmadd_pch(a, b, c, 0b0101010101010101); let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, + -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmul_pch() { + unsafe fn test_mm512_maskz_fmadd_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_maskz_fmul_pch(0b0101010101010101, a, b); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_maskz_fmadd_pch(0b0101010101010101, a, b, c); let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, + -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmul_round_pch() { + unsafe fn test_mm512_fmadd_round_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = + _mm512_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_pch(-2.0, 3.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmul_round_pch() { + unsafe fn test_mm512_mask_fmadd_round_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, - ); - let r = _mm512_mask_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0b0101010101010101, + b, + c, + ); + let e = _mm512_setr_ph( + -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, + -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask3_fmadd_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask3_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, + c, + 0b0101010101010101, ); let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, + -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmul_round_pch() { + unsafe fn test_mm512_maskz_fmadd_round_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_maskz_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_maskz_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b0101010101010101, a, b, + c, ); let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, + -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmul_round_sch() { + unsafe fn test_mm_fmadd_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_fmadd_sch(a, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_fmadd_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask_fmadd_sch(a, 0, b, c); + let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + let r = _mm_mask_fmadd_sch(a, 1, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmul_round_sch() { + unsafe fn test_mm_mask3_fmadd_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask3_fmadd_sch(a, b, c, 0); + let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + let r = _mm_mask3_fmadd_sch(a, b, c, 1); + let e = _mm_setr_ph(-2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmul_round_sch() { + unsafe fn test_mm_maskz_fmadd_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = - _mm_maskz_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_maskz_fmadd_sch(0, a, b, c); let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); + let r = _mm_maskz_fmadd_sch(1, a, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmul_sch() { + unsafe fn test_mm_fmadd_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_fmul_sch(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmul_sch() { + unsafe fn test_mm_mask_fmadd_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_fmul_sch(src, 0, a, b); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0, b, c, + ); + let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + let r = _mm_mask_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 1, b, c, + ); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmul_sch() { + unsafe fn test_mm_mask3_fmadd_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_maskz_fmul_sch(0, a, b); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask3_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 0, + ); + let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + let r = _mm_mask3_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 1, + ); + let e = _mm_setr_ph(-2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_fmadd_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_maskz_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0, a, b, c, + ); let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); + let r = _mm_maskz_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 1, a, b, c, + ); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_cmul_pch() { + unsafe fn test_mm_fcmadd_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let r = _mm_cmul_pch(a, b); - let e = _mm_set1_pch(-1.0, 0.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_fcmadd_pch(a, b, c); + let e = _mm_set1_pch(2.0, 3.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_cmul_pch() { + unsafe fn test_mm_mask_fcmadd_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); - let r = _mm_mask_cmul_pch(src, 0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_mask_fcmadd_pch(a, 0b0101, b, c); + let e = _mm_setr_ph(2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_cmul_pch() { + unsafe fn test_mm_mask3_fcmadd_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let r = _mm_maskz_cmul_pch(0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_mask3_fcmadd_pch(a, b, c, 0b0101); + let e = _mm_setr_ph(2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_cmul_pch() { + unsafe fn test_mm_maskz_fcmadd_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_maskz_fcmadd_pch(0b0101, a, b, c); + let e = _mm_setr_ph(2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_fcmadd_pch() { let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let r = _mm256_cmul_pch(a, b); - let e = _mm256_set1_pch(-1.0, 0.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_fcmadd_pch(a, b, c); + let e = _mm256_set1_pch(2.0, 3.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_cmul_pch() { + unsafe fn test_mm256_mask_fcmadd_pch() { let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let src = _mm256_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_mask_fcmadd_pch(a, 0b01010101, b, c); + let e = _mm256_setr_ph( + 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, ); - let r = _mm256_mask_cmul_pch(src, 0b01010101, a, b); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask3_fcmadd_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_mask3_fcmadd_pch(a, b, c, 0b01010101); let e = _mm256_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_cmul_pch() { + unsafe fn test_mm256_maskz_fcmadd_pch() { let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let r = _mm256_maskz_cmul_pch(0b01010101, a, b); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_maskz_fcmadd_pch(0b01010101, a, b, c); let e = _mm256_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cmul_pch() { + unsafe fn test_mm512_fcmadd_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_cmul_pch(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_fcmadd_pch(a, b, c); + let e = _mm512_set1_pch(2.0, 3.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cmul_pch() { + unsafe fn test_mm512_mask_fcmadd_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask_fcmadd_pch(a, 0b0101010101010101, b, c); + let e = _mm512_setr_ph( + 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, + 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, ); - let r = _mm512_mask_cmul_pch(src, 0b0101010101010101, a, b); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask3_fcmadd_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask3_fcmadd_pch(a, b, c, 0b0101010101010101); let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, + 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_cmul_pch() { + unsafe fn test_mm512_maskz_fcmadd_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_maskz_cmul_pch(0b0101010101010101, a, b); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_maskz_fcmadd_pch(0b0101010101010101, a, b, c); let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, + 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cmul_round_pch() { + unsafe fn test_mm512_fcmadd_round_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = + _mm512_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_pch(2.0, 3.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cmul_round_pch() { + unsafe fn test_mm512_mask_fcmadd_round_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, - ); - let r = _mm512_mask_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0b0101010101010101, + b, + c, + ); + let e = _mm512_setr_ph( + 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, + 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask3_fcmadd_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask3_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, + c, + 0b0101010101010101, ); let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, + 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_cmul_round_pch() { + unsafe fn test_mm512_maskz_fcmadd_round_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_maskz_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_maskz_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b0101010101010101, a, b, + c, ); let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, + 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cmul_sch() { + unsafe fn test_mm_fcmadd_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_cmul_sch(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_fcmadd_sch(a, b, c); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_cmul_sch() { + unsafe fn test_mm_mask_fcmadd_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_cmul_sch(src, 0, a, b); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask_fcmadd_sch(a, 0, b, c); + let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + let r = _mm_mask_fcmadd_sch(a, 1, b, c); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_cmul_sch() { + unsafe fn test_mm_mask3_fcmadd_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_maskz_cmul_sch(0, a, b); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask3_fcmadd_sch(a, b, c, 0); + let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + let r = _mm_mask3_fcmadd_sch(a, b, c, 1); + let e = _mm_setr_ph(2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_fcmadd_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_maskz_fcmadd_sch(0, a, b, c); let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); + let r = _mm_maskz_fcmadd_sch(1, a, b, c); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cmul_round_sch() { + unsafe fn test_mm_fcmadd_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_cmul_round_sch() { + unsafe fn test_mm_mask_fcmadd_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0, b, c, ); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + let r = _mm_mask_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 1, b, c, + ); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_cmul_round_sch() { + unsafe fn test_mm_mask3_fcmadd_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = - _mm_maskz_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask3_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 0, + ); + let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + let r = _mm_mask3_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 1, + ); + let e = _mm_setr_ph(2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_fcmadd_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_maskz_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0, a, b, c, + ); let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); + let r = _mm_maskz_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 1, a, b, c, + ); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fcmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let r = _mm_fcmul_pch(a, b); - let e = _mm_set1_pch(-1.0, 0.0); + unsafe fn test_mm_fmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_fmadd_ph(a, b, c); + let e = _mm_set1_ph(5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fcmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); - let r = _mm_mask_fcmul_pch(src, 0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); + unsafe fn test_mm_mask_fmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask_fmadd_ph(a, 0b01010101, b, c); + let e = _mm_set_ph(1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fcmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let r = _mm_maskz_fcmul_pch(0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); + unsafe fn test_mm_mask3_fmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask3_fmadd_ph(a, b, c, 0b01010101); + let e = _mm_set_ph(3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fcmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let r = _mm256_fcmul_pch(a, b); - let e = _mm256_set1_pch(-1.0, 0.0); + unsafe fn test_mm_maskz_fmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_maskz_fmadd_ph(0b01010101, a, b, c); + let e = _mm_set_ph(0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_fmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_fmadd_ph(a, b, c); + let e = _mm256_set1_ph(5.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fcmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let src = _mm256_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + unsafe fn test_mm256_mask_fmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask_fmadd_ph(a, 0b0101010101010101, b, c); + let e = _mm256_set_ph( + 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, ); - let r = _mm256_mask_fcmul_pch(src, 0b01010101, a, b); - let e = _mm256_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask3_fmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask3_fmadd_ph(a, b, c, 0b0101010101010101); + let e = _mm256_set_ph( + 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fcmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let r = _mm256_maskz_fcmul_pch(0b01010101, a, b); - let e = _mm256_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + unsafe fn test_mm256_maskz_fmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_maskz_fmadd_ph(0b0101010101010101, a, b, c); + let e = _mm256_set_ph( + 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fcmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_fcmul_pch(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + unsafe fn test_mm512_fmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fmadd_ph(a, b, c); + let e = _mm512_set1_ph(5.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fcmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, - ); - let r = _mm512_mask_fcmul_pch(src, 0b0101010101010101, a, b); - let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + unsafe fn test_mm512_mask_fmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmadd_ph(a, 0b01010101010101010101010101010101, b, c); + let e = _mm512_set_ph( + 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, + 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fcmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_maskz_fcmul_pch(0b0101010101010101, a, b); - let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + unsafe fn test_mm512_mask3_fmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmadd_ph(a, b, c, 0b01010101010101010101010101010101); + let e = _mm512_set_ph( + 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, + 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fcmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + unsafe fn test_mm512_maskz_fmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmadd_ph(0b01010101010101010101010101010101, a, b, c); + let e = _mm512_set_ph( + 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, + 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fcmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, - ); - let r = _mm512_mask_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, - 0b0101010101010101, + unsafe fn test_mm512_fmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_ph(5.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_fmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, + 0b01010101010101010101010101010101, b, + c, ); - let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + let e = _mm512_set_ph( + 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, + 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fcmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_maskz_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b0101010101010101, + unsafe fn test_mm512_mask3_fmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, + c, + 0b01010101010101010101010101010101, ); - let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + let e = _mm512_set_ph( + 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, + 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fcmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_fcmul_sch(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fcmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_fcmul_sch(src, 0, a, b); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_maskz_fmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + c, + ); + let e = _mm512_set_ph( + 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, + 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fcmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_maskz_fcmul_sch(0, a, b); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_fmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fmadd_sh(a, b, c); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fcmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_mask_fmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fmadd_sh(a, 0, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fcmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let r = _mm_mask_fmadd_sh(a, 1, b, c); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fcmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = - _mm_maskz_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_mask3_fmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fmadd_sh(a, b, c, 0); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_abs_ph() { - let a = _mm_set_ph(-1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0); - let r = _mm_abs_ph(a); - let e = _mm_set_ph(1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0); + let r = _mm_mask3_fmadd_sh(a, b, c, 1); + let e = _mm_setr_ph(5.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_abs_ph() { - let a = _mm256_set_ph( - -1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, - -14.0, - ); - let r = _mm256_abs_ph(a); - let e = _mm256_set_ph( - 1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, - ); - assert_eq_m256h(r, e); - } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_abs_ph() { - let a = _mm512_set_ph( - -1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, - -14.0, 15.0, -16.0, 17.0, -18.0, 19.0, -20.0, 21.0, -22.0, 23.0, -24.0, 25.0, -26.0, - 27.0, -28.0, 29.0, -30.0, - ); - let r = _mm512_abs_ph(a); - let e = _mm512_set_ph( - 1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, - 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, - 29.0, 30.0, - ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_conj_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let r = _mm_conj_pch(a); - let e = _mm_set1_pch(0.0, -1.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_conj_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); - let r = _mm_mask_conj_pch(src, 0b0101, a); - let e = _mm_setr_ph(0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0); + unsafe fn test_mm_maskz_fmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fmadd_sh(0, a, b, c); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_conj_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let r = _mm_maskz_conj_pch(0b0101, a); - let e = _mm_setr_ph(0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0); + let r = _mm_maskz_fmadd_sh(1, a, b, c); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_conj_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_conj_pch(a); - let e = _mm256_set1_pch(0.0, -1.0); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_conj_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let src = _mm256_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - ); - let r = _mm256_mask_conj_pch(src, 0b01010101, a); - let e = _mm256_setr_ph( - 0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0, 0.0, -1.0, 12.0, 13.0, 0.0, -1.0, 16.0, 17.0, - ); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_conj_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_maskz_conj_pch(0b01010101, a); - let e = _mm256_setr_ph( - 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, - ); - assert_eq_m256h(r, e); + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_fmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_conj_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_conj_pch(a); - let e = _mm512_set1_pch(0.0, -1.0); - assert_eq_m512h(r, e); + unsafe fn test_mm_mask_fmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0, b, c, + ); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_mask_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 1, b, c, + ); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_conj_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, + unsafe fn test_mm_mask3_fmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 0, ); - let r = _mm512_mask_conj_pch(src, 0b0101010101010101, a); - let e = _mm512_setr_ph( - 0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0, 0.0, -1.0, 12.0, 13.0, 0.0, -1.0, 16.0, 17.0, - 0.0, -1.0, 20.0, 21.0, 0.0, -1.0, 24.0, 25.0, 0.0, -1.0, 28.0, 29.0, 0.0, -1.0, 32.0, - 33.0, + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + assert_eq_m128h(r, e); + let r = _mm_mask3_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 1, ); - assert_eq_m512h(r, e); + let e = _mm_setr_ph(5.0, 30., 31., 32., 33., 34., 35., 36.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_conj_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_maskz_conj_pch(0b0101010101010101, a); - let e = _mm512_setr_ph( - 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, - 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, + unsafe fn test_mm_maskz_fmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0, a, b, c, ); - assert_eq_m512h(r, e); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_maskz_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 1, a, b, c, + ); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_fmadd_pch(a, b, c); - let e = _mm_set1_pch(-2.0, 3.0); + unsafe fn test_mm_fmsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_fmsub_ph(a, b, c); + let e = _mm_set1_ph(-1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_mask_fmadd_pch(a, 0b0101, b, c); - let e = _mm_setr_ph(-2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0); + unsafe fn test_mm_mask_fmsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask_fmsub_ph(a, 0b01010101, b, c); + let e = _mm_set_ph(1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_mask3_fmadd_pch(a, b, c, 0b0101); - let e = _mm_setr_ph(-2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0); + unsafe fn test_mm_mask3_fmsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask3_fmsub_ph(a, b, c, 0b01010101); + let e = _mm_set_ph(3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_maskz_fmadd_pch(0b0101, a, b, c); - let e = _mm_setr_ph(-2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0); + unsafe fn test_mm_maskz_fmsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_maskz_fmsub_ph(0b01010101, a, b, c); + let e = _mm_set_ph(0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_fmadd_pch(a, b, c); - let e = _mm256_set1_pch(-2.0, 3.0); + unsafe fn test_mm256_fmsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_fmsub_ph(a, b, c); + let e = _mm256_set1_ph(-1.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_mask_fmadd_pch(a, 0b01010101, b, c); - let e = _mm256_setr_ph( - -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, + unsafe fn test_mm256_mask_fmsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask_fmsub_ph(a, 0b0101010101010101, b, c); + let e = _mm256_set_ph( + 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_mask3_fmadd_pch(a, b, c, 0b01010101); - let e = _mm256_setr_ph( - -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, + unsafe fn test_mm256_mask3_fmsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask3_fmsub_ph(a, b, c, 0b0101010101010101); + let e = _mm256_set_ph( + 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_maskz_fmadd_pch(0b01010101, a, b, c); - let e = _mm256_setr_ph( - -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, + unsafe fn test_mm256_maskz_fmsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_maskz_fmsub_ph(0b0101010101010101, a, b, c); + let e = _mm256_set_ph( + 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_fmadd_pch(a, b, c); - let e = _mm512_set1_pch(-2.0, 3.0); + unsafe fn test_mm512_fmsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fmsub_ph(a, b, c); + let e = _mm512_set1_ph(-1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask_fmadd_pch(a, 0b0101010101010101, b, c); - let e = _mm512_setr_ph( - -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, - -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, + unsafe fn test_mm512_mask_fmsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmsub_ph(a, 0b01010101010101010101010101010101, b, c); + let e = _mm512_set_ph( + 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, + 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask3_fmadd_pch(a, b, c, 0b0101010101010101); - let e = _mm512_setr_ph( - -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, - -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, + unsafe fn test_mm512_mask3_fmsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmsub_ph(a, b, c, 0b01010101010101010101010101010101); + let e = _mm512_set_ph( + 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, + 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_maskz_fmadd_pch(0b0101010101010101, a, b, c); - let e = _mm512_setr_ph( - -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, - -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, + unsafe fn test_mm512_maskz_fmsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmsub_ph(0b01010101010101010101010101010101, a, b, c); + let e = _mm512_set_ph( + 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, + 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = - _mm512_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set1_pch(-2.0, 3.0); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_fmsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_ph(-1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_fmsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, - 0b0101010101010101, + 0b01010101010101010101010101010101, b, c, ); - let e = _mm512_setr_ph( - -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, - -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, + let e = _mm512_set_ph( + 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, + 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask3_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask3_fmsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, - 0b0101010101010101, + 0b01010101010101010101010101010101, ); - let e = _mm512_setr_ph( - -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, - -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, + let e = _mm512_set_ph( + 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, + 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_maskz_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b0101010101010101, + unsafe fn test_mm512_maskz_fmsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, a, b, c, ); - let e = _mm512_setr_ph( - -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, - -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, + let e = _mm512_set_ph( + 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, + 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_fmadd_sch(a, b, c); - let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_fmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fmsub_sh(a, b, c); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask_fmadd_sch(a, 0, b, c); - let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_mask_fmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fmsub_sh(a, 0, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_fmadd_sch(a, 1, b, c); - let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let r = _mm_mask_fmsub_sh(a, 1, b, c); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask3_fmadd_sch(a, b, c, 0); - let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + unsafe fn test_mm_mask3_fmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fmsub_sh(a, b, c, 0); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - let r = _mm_mask3_fmadd_sch(a, b, c, 1); - let e = _mm_setr_ph(-2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask3_fmsub_sh(a, b, c, 1); + let e = _mm_setr_ph(-1.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_maskz_fmadd_sch(0, a, b, c); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_maskz_fmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fmsub_sh(0, a, b, c); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_fmadd_sch(1, a, b, c); - let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let r = _mm_maskz_fmsub_sh(1, a, b, c); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_fmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_mask_fmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, 0, b, c, ); - let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, 1, b, c, ); - let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask3_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_mask3_fmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, 0, ); - let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - let r = _mm_mask3_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask3_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, 1, ); - let e = _mm_setr_ph(-2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let e = _mm_setr_ph(-1.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_maskz_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_maskz_fmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0, a, b, c, ); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_maskz_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 1, a, b, c, ); - let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fcmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_fcmadd_pch(a, b, c); - let e = _mm_set1_pch(2.0, 3.0); + unsafe fn test_mm_fnmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_fnmadd_ph(a, b, c); + let e = _mm_set1_ph(1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fcmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_mask_fcmadd_pch(a, 0b0101, b, c); - let e = _mm_setr_ph(2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0); + unsafe fn test_mm_mask_fnmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask_fnmadd_ph(a, 0b01010101, b, c); + let e = _mm_set_ph(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fcmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_mask3_fcmadd_pch(a, b, c, 0b0101); - let e = _mm_setr_ph(2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0); + unsafe fn test_mm_mask3_fnmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask3_fnmadd_ph(a, b, c, 0b01010101); + let e = _mm_set_ph(3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fcmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_maskz_fcmadd_pch(0b0101, a, b, c); - let e = _mm_setr_ph(2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0); + unsafe fn test_mm_maskz_fnmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_maskz_fnmadd_ph(0b01010101, a, b, c); + let e = _mm_set_ph(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fcmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_fcmadd_pch(a, b, c); - let e = _mm256_set1_pch(2.0, 3.0); + unsafe fn test_mm256_fnmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_fnmadd_ph(a, b, c); + let e = _mm256_set1_ph(1.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fcmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_mask_fcmadd_pch(a, 0b01010101, b, c); - let e = _mm256_setr_ph( - 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, + unsafe fn test_mm256_mask_fnmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask_fnmadd_ph(a, 0b0101010101010101, b, c); + let e = _mm256_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fcmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_mask3_fcmadd_pch(a, b, c, 0b01010101); - let e = _mm256_setr_ph( - 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, + unsafe fn test_mm256_mask3_fnmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask3_fnmadd_ph(a, b, c, 0b0101010101010101); + let e = _mm256_set_ph( + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fcmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_maskz_fcmadd_pch(0b01010101, a, b, c); - let e = _mm256_setr_ph( - 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, + unsafe fn test_mm256_maskz_fnmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_maskz_fnmadd_ph(0b0101010101010101, a, b, c); + let e = _mm256_set_ph( + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fcmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_fcmadd_pch(a, b, c); - let e = _mm512_set1_pch(2.0, 3.0); + unsafe fn test_mm512_fnmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fnmadd_ph(a, b, c); + let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fcmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask_fcmadd_pch(a, 0b0101010101010101, b, c); - let e = _mm512_setr_ph( - 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, - 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, + unsafe fn test_mm512_mask_fnmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fnmadd_ph(a, 0b01010101010101010101010101010101, b, c); + let e = _mm512_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fcmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask3_fcmadd_pch(a, b, c, 0b0101010101010101); - let e = _mm512_setr_ph( - 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, - 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, + unsafe fn test_mm512_mask3_fnmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fnmadd_ph(a, b, c, 0b01010101010101010101010101010101); + let e = _mm512_set_ph( + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, + 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fcmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_maskz_fcmadd_pch(0b0101010101010101, a, b, c); - let e = _mm512_setr_ph( - 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, - 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, + unsafe fn test_mm512_maskz_fnmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fnmadd_ph(0b01010101010101010101010101010101, a, b, c); + let e = _mm512_set_ph( + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fcmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); + unsafe fn test_mm512_fnmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); let r = - _mm512_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set1_pch(2.0, 3.0); + _mm512_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fcmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask_fnmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, - 0b0101010101010101, + 0b01010101010101010101010101010101, b, c, ); - let e = _mm512_setr_ph( - 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, - 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, + let e = _mm512_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fcmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask3_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask3_fnmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, - 0b0101010101010101, + 0b01010101010101010101010101010101, ); - let e = _mm512_setr_ph( - 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, - 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, + let e = _mm512_set_ph( + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, + 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fcmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_maskz_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b0101010101010101, + unsafe fn test_mm512_maskz_fnmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, a, b, c, ); - let e = _mm512_setr_ph( - 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, - 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, + let e = _mm512_set_ph( + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fcmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_fcmadd_sch(a, b, c); - let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_fnmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fnmadd_sh(a, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fcmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask_fcmadd_sch(a, 0, b, c); - let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_mask_fnmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fnmadd_sh(a, 0, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_fcmadd_sch(a, 1, b, c); - let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let r = _mm_mask_fnmadd_sh(a, 1, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fcmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask3_fcmadd_sch(a, b, c, 0); - let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + unsafe fn test_mm_mask3_fnmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fnmadd_sh(a, b, c, 0); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - let r = _mm_mask3_fcmadd_sch(a, b, c, 1); - let e = _mm_setr_ph(2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask3_fnmadd_sh(a, b, c, 1); + let e = _mm_setr_ph(1.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fcmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_maskz_fcmadd_sch(0, a, b, c); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_maskz_fnmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fnmadd_sh(0, a, b, c); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_fcmadd_sch(1, a, b, c); - let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let r = _mm_maskz_fnmadd_sh(1, a, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fcmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_fnmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fcmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_mask_fnmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, 0, b, c, ); - let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, 1, b, c, ); - let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fcmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask3_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_mask3_fnmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, 0, ); - let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - let r = _mm_mask3_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask3_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, 1, ); - let e = _mm_setr_ph(2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let e = _mm_setr_ph(1.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fcmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_maskz_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_maskz_fnmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0, a, b, c, ); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_maskz_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 1, a, b, c, ); - let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmadd_ph() { + unsafe fn test_mm_fnmsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_fmadd_ph(a, b, c); - let e = _mm_set1_ph(5.0); + let r = _mm_fnmsub_ph(a, b, c); + let e = _mm_set1_ph(-5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmadd_ph() { + unsafe fn test_mm_mask_fnmsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_mask_fmadd_ph(a, 0b01010101, b, c); - let e = _mm_set_ph(1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0); + let r = _mm_mask_fnmsub_ph(a, 0b01010101, b, c); + let e = _mm_set_ph(1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fmadd_ph() { + unsafe fn test_mm_mask3_fnmsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_mask3_fmadd_ph(a, b, c, 0b01010101); - let e = _mm_set_ph(3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0); + let r = _mm_mask3_fnmsub_ph(a, b, c, 0b01010101); + let e = _mm_set_ph(3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmadd_ph() { + unsafe fn test_mm_maskz_fnmsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_maskz_fmadd_ph(0b01010101, a, b, c); - let e = _mm_set_ph(0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0); + let r = _mm_maskz_fnmsub_ph(0b01010101, a, b, c); + let e = _mm_set_ph(0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmadd_ph() { + unsafe fn test_mm256_fnmsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_fmadd_ph(a, b, c); - let e = _mm256_set1_ph(5.0); + let r = _mm256_fnmsub_ph(a, b, c); + let e = _mm256_set1_ph(-5.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmadd_ph() { + unsafe fn test_mm256_mask_fnmsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_mask_fmadd_ph(a, 0b0101010101010101, b, c); + let r = _mm256_mask_fnmsub_ph(a, 0b0101010101010101, b, c); let e = _mm256_set_ph( - 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, + 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fmadd_ph() { + unsafe fn test_mm256_mask3_fnmsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_mask3_fmadd_ph(a, b, c, 0b0101010101010101); + let r = _mm256_mask3_fnmsub_ph(a, b, c, 0b0101010101010101); let e = _mm256_set_ph( - 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, + 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmadd_ph() { + unsafe fn test_mm256_maskz_fnmsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_maskz_fmadd_ph(0b0101010101010101, a, b, c); + let r = _mm256_maskz_fnmsub_ph(0b0101010101010101, a, b, c); let e = _mm256_set_ph( - 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, + 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmadd_ph() { + unsafe fn test_mm512_fnmsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_fmadd_ph(a, b, c); - let e = _mm512_set1_ph(5.0); + let r = _mm512_fnmsub_ph(a, b, c); + let e = _mm512_set1_ph(-5.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmadd_ph() { + unsafe fn test_mm512_mask_fnmsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmadd_ph(a, 0b01010101010101010101010101010101, b, c); + let r = _mm512_mask_fnmsub_ph(a, 0b01010101010101010101010101010101, b, c); let e = _mm512_set_ph( - 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, - 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, + 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, + 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmadd_ph() { + unsafe fn test_mm512_mask3_fnmsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmadd_ph(a, b, c, 0b01010101010101010101010101010101); + let r = _mm512_mask3_fnmsub_ph(a, b, c, 0b01010101010101010101010101010101); let e = _mm512_set_ph( - 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, - 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, + 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, + 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmadd_ph() { + unsafe fn test_mm512_maskz_fnmsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmadd_ph(0b01010101010101010101010101010101, a, b, c); + let r = _mm512_maskz_fnmsub_ph(0b01010101010101010101010101010101, a, b, c); let e = _mm512_set_ph( - 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, - 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, + 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, + 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmadd_round_ph() { + unsafe fn test_mm512_fnmsub_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set1_ph(5.0); + let r = + _mm512_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_ph(-5.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmadd_round_ph() { + unsafe fn test_mm512_mask_fnmsub_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, 0b01010101010101010101010101010101, b, c, ); let e = _mm512_set_ph( - 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, - 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, + 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, + 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmadd_round_ph() { + unsafe fn test_mm512_mask3_fnmsub_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask3_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, 0b01010101010101010101010101010101, ); let e = _mm512_set_ph( - 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, - 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, + 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, + 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmadd_round_ph() { + unsafe fn test_mm512_maskz_fnmsub_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_maskz_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b01010101010101010101010101010101, a, b, c, ); let e = _mm512_set_ph( - 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, - 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, + 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, + 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmadd_sh() { + unsafe fn test_mm_fnmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fmadd_sh(a, b, c); - let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_fnmsub_sh(a, b, c); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmadd_sh() { + unsafe fn test_mm_mask_fnmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fmadd_sh(a, 0, b, c); + let r = _mm_mask_fnmsub_sh(a, 0, b, c); let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_fmadd_sh(a, 1, b, c); - let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_mask_fnmsub_sh(a, 1, b, c); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fmadd_sh() { + unsafe fn test_mm_mask3_fnmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fmadd_sh(a, b, c, 0); + let r = _mm_mask3_fnmsub_sh(a, b, c, 0); let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - let r = _mm_mask3_fmadd_sh(a, b, c, 1); - let e = _mm_setr_ph(5.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fnmsub_sh(a, b, c, 1); + let e = _mm_setr_ph(-5.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmadd_sh() { + unsafe fn test_mm_maskz_fnmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fmadd_sh(0, a, b, c); + let r = _mm_maskz_fnmsub_sh(0, a, b, c); let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_fmadd_sh(1, a, b, c); - let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_maskz_fnmsub_sh(1, a, b, c); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmadd_round_sh() { + unsafe fn test_mm_fnmsub_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmadd_round_sh() { + unsafe fn test_mm_mask_fnmsub_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, 0, b, c, ); let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, 1, b, c, ); - let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fmadd_round_sh() { + unsafe fn test_mm_mask3_fnmsub_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask3_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, 0, ); let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - let r = _mm_mask3_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask3_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, 1, ); - let e = _mm_setr_ph(5.0, 30., 31., 32., 33., 34., 35., 36.); + let e = _mm_setr_ph(-5.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmadd_round_sh() { + unsafe fn test_mm_maskz_fnmsub_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_maskz_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0, a, b, c, ); let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_maskz_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 1, a, b, c, ); - let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmsub_ph() { + unsafe fn test_mm_fmaddsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_fmsub_ph(a, b, c); - let e = _mm_set1_ph(-1.0); + let r = _mm_fmaddsub_ph(a, b, c); + let e = _mm_set_ph(5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmsub_ph() { + unsafe fn test_mm_mask_fmaddsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_mask_fmsub_ph(a, 0b01010101, b, c); - let e = _mm_set_ph(1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0); + let r = _mm_mask_fmaddsub_ph(a, 0b00110011, b, c); + let e = _mm_set_ph(1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fmsub_ph() { + unsafe fn test_mm_mask3_fmaddsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_mask3_fmsub_ph(a, b, c, 0b01010101); - let e = _mm_set_ph(3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0); + let r = _mm_mask3_fmaddsub_ph(a, b, c, 0b00110011); + let e = _mm_set_ph(3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmsub_ph() { + unsafe fn test_mm_maskz_fmaddsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_maskz_fmsub_ph(0b01010101, a, b, c); - let e = _mm_set_ph(0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0); + let r = _mm_maskz_fmaddsub_ph(0b00110011, a, b, c); + let e = _mm_set_ph(0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmsub_ph() { + unsafe fn test_mm256_fmaddsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_fmsub_ph(a, b, c); - let e = _mm256_set1_ph(-1.0); + let r = _mm256_fmaddsub_ph(a, b, c); + let e = _mm256_set_ph( + 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, + ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmsub_ph() { + unsafe fn test_mm256_mask_fmaddsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_mask_fmsub_ph(a, 0b0101010101010101, b, c); + let r = _mm256_mask_fmaddsub_ph(a, 0b0011001100110011, b, c); let e = _mm256_set_ph( - 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, + 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fmsub_ph() { + unsafe fn test_mm256_mask3_fmaddsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_mask3_fmsub_ph(a, b, c, 0b0101010101010101); + let r = _mm256_mask3_fmaddsub_ph(a, b, c, 0b0011001100110011); let e = _mm256_set_ph( - 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, + 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmsub_ph() { + unsafe fn test_mm256_maskz_fmaddsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_maskz_fmsub_ph(0b0101010101010101, a, b, c); + let r = _mm256_maskz_fmaddsub_ph(0b0011001100110011, a, b, c); let e = _mm256_set_ph( - 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, + 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmsub_ph() { + unsafe fn test_mm512_fmaddsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_fmsub_ph(a, b, c); - let e = _mm512_set1_ph(-1.0); + let r = _mm512_fmaddsub_ph(a, b, c); + let e = _mm512_set_ph( + 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, + 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmsub_ph() { + unsafe fn test_mm512_mask_fmaddsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmsub_ph(a, 0b01010101010101010101010101010101, b, c); + let r = _mm512_mask_fmaddsub_ph(a, 0b00110011001100110011001100110011, b, c); let e = _mm512_set_ph( - 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, - 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, + 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, + 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmsub_ph() { + unsafe fn test_mm512_mask3_fmaddsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmsub_ph(a, b, c, 0b01010101010101010101010101010101); + let r = _mm512_mask3_fmaddsub_ph(a, b, c, 0b00110011001100110011001100110011); let e = _mm512_set_ph( - 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, - 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, + 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, + 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmsub_ph() { + unsafe fn test_mm512_maskz_fmaddsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmsub_ph(0b01010101010101010101010101010101, a, b, c); + let r = _mm512_maskz_fmaddsub_ph(0b00110011001100110011001100110011, a, b, c); let e = _mm512_set_ph( - 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, - 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, + 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, + 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmsub_round_ph() { + unsafe fn test_mm512_fmaddsub_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set1_ph(-1.0); + let r = + _mm512_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set_ph( + 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, + 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmsub_round_ph() { + unsafe fn test_mm512_mask_fmaddsub_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, - 0b01010101010101010101010101010101, + 0b00110011001100110011001100110011, b, c, ); let e = _mm512_set_ph( - 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, - 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, + 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, + 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmsub_round_ph() { + unsafe fn test_mm512_mask3_fmaddsub_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask3_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, - 0b01010101010101010101010101010101, + 0b00110011001100110011001100110011, ); let e = _mm512_set_ph( - 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, - 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, + 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, + 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmsub_round_ph() { + unsafe fn test_mm512_maskz_fmaddsub_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, + let r = _mm512_maskz_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b00110011001100110011001100110011, a, b, c, ); let e = _mm512_set_ph( - 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, - 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, + 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, + 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, ); assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fmsub_sh(a, b, c); - let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fmsub_sh(a, 0, b, c); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_fmsub_sh(a, 1, b, c); - let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fmsub_sh(a, b, c, 0); - let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - assert_eq_m128h(r, e); - let r = _mm_mask3_fmsub_sh(a, b, c, 1); - let e = _mm_setr_ph(-1.0, 30., 31., 32., 33., 34., 35., 36.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fmsub_sh(0, a, b, c); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_maskz_fmsub_sh(1, a, b, c); - let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 0, b, c, - ); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 1, b, c, - ); - let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 0, - ); - let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - assert_eq_m128h(r, e); - let r = _mm_mask3_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 1, - ); - let e = _mm_setr_ph(-1.0, 30., 31., 32., 33., 34., 35., 36.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0, a, b, c, - ); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_maskz_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 1, a, b, c, - ); - let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fnmadd_ph() { + unsafe fn test_mm_fmsubadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_fnmadd_ph(a, b, c); - let e = _mm_set1_ph(1.0); + let r = _mm_fmsubadd_ph(a, b, c); + let e = _mm_set_ph(-1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fnmadd_ph() { + unsafe fn test_mm_mask_fmsubadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_mask_fnmadd_ph(a, 0b01010101, b, c); - let e = _mm_set_ph(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0); + let r = _mm_mask_fmsubadd_ph(a, 0b00110011, b, c); + let e = _mm_set_ph(1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fnmadd_ph() { + unsafe fn test_mm_mask3_fmsubadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_mask3_fnmadd_ph(a, b, c, 0b01010101); - let e = _mm_set_ph(3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0); + let r = _mm_mask3_fmsubadd_ph(a, b, c, 0b00110011); + let e = _mm_set_ph(3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fnmadd_ph() { + unsafe fn test_mm_maskz_fmsubadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_maskz_fnmadd_ph(0b01010101, a, b, c); - let e = _mm_set_ph(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); + let r = _mm_maskz_fmsubadd_ph(0b00110011, a, b, c); + let e = _mm_set_ph(0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fnmadd_ph() { + unsafe fn test_mm256_fmsubadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_fnmadd_ph(a, b, c); - let e = _mm256_set1_ph(1.0); + let r = _mm256_fmsubadd_ph(a, b, c); + let e = _mm256_set_ph( + -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fnmadd_ph() { + unsafe fn test_mm256_mask_fmsubadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_mask_fnmadd_ph(a, 0b0101010101010101, b, c); + let r = _mm256_mask_fmsubadd_ph(a, 0b0011001100110011, b, c); let e = _mm256_set_ph( - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fnmadd_ph() { + unsafe fn test_mm256_mask3_fmsubadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_mask3_fnmadd_ph(a, b, c, 0b0101010101010101); + let r = _mm256_mask3_fmsubadd_ph(a, b, c, 0b0011001100110011); let e = _mm256_set_ph( - 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, + 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fnmadd_ph() { + unsafe fn test_mm256_maskz_fmsubadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_maskz_fnmadd_ph(0b0101010101010101, a, b, c); + let r = _mm256_maskz_fmsubadd_ph(0b0011001100110011, a, b, c); let e = _mm256_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fnmadd_ph() { + unsafe fn test_mm512_fmsubadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_fnmadd_ph(a, b, c); - let e = _mm512_set1_ph(1.0); + let r = _mm512_fmsubadd_ph(a, b, c); + let e = _mm512_set_ph( + -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fnmadd_ph() { + unsafe fn test_mm512_mask_fmsubadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fnmadd_ph(a, 0b01010101010101010101010101010101, b, c); + let r = _mm512_mask_fmsubadd_ph(a, 0b00110011001100110011001100110011, b, c); let e = _mm512_set_ph( - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, + 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fnmadd_ph() { + unsafe fn test_mm512_mask3_fmsubadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fnmadd_ph(a, b, c, 0b01010101010101010101010101010101); + let r = _mm512_mask3_fmsubadd_ph(a, b, c, 0b00110011001100110011001100110011); let e = _mm512_set_ph( - 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, - 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, + 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, + 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fnmadd_ph() { + unsafe fn test_mm512_maskz_fmsubadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fnmadd_ph(0b01010101010101010101010101010101, a, b, c); + let r = _mm512_maskz_fmsubadd_ph(0b00110011001100110011001100110011, a, b, c); let e = _mm512_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, - 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, + 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fnmadd_round_ph() { + unsafe fn test_mm512_fmsubadd_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); let r = - _mm512_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set1_ph(1.0); + _mm512_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set_ph( + -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fnmadd_round_ph() { + unsafe fn test_mm512_mask_fmsubadd_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, - 0b01010101010101010101010101010101, + 0b00110011001100110011001100110011, b, c, ); let e = _mm512_set_ph( - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, + 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fnmadd_round_ph() { + unsafe fn test_mm512_mask3_fmsubadd_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask3_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, - 0b01010101010101010101010101010101, + 0b00110011001100110011001100110011, ); let e = _mm512_set_ph( - 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, - 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, + 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, + 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fnmadd_round_ph() { + unsafe fn test_mm512_maskz_fmsubadd_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, + let r = _mm512_maskz_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b00110011001100110011001100110011, a, b, c, ); let e = _mm512_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, - 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, + 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, ); assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fnmadd_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fnmadd_sh(a, b, c); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_rcp_ph() { + let a = _mm_set1_ph(2.0); + let r = _mm_rcp_ph(a); + let e = _mm_set1_ph(0.5); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fnmadd_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fnmadd_sh(a, 0, b, c); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_fnmadd_sh(a, 1, b, c); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_rcp_ph() { + let a = _mm_set1_ph(2.0); + let src = _mm_set1_ph(1.0); + let r = _mm_mask_rcp_ph(src, 0b01010101, a); + let e = _mm_set_ph(1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fnmadd_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fnmadd_sh(a, b, c, 0); - let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - assert_eq_m128h(r, e); - let r = _mm_mask3_fnmadd_sh(a, b, c, 1); - let e = _mm_setr_ph(1.0, 30., 31., 32., 33., 34., 35., 36.); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_rcp_ph() { + let a = _mm_set1_ph(2.0); + let r = _mm_maskz_rcp_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fnmadd_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fnmadd_sh(0, a, b, c); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_maskz_fnmadd_sh(1, a, b, c); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_rcp_ph() { + let a = _mm256_set1_ph(2.0); + let r = _mm256_rcp_ph(a); + let e = _mm256_set1_ph(0.5); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_rcp_ph() { + let a = _mm256_set1_ph(2.0); + let src = _mm256_set1_ph(1.0); + let r = _mm256_mask_rcp_ph(src, 0b0101010101010101, a); + let e = _mm256_set_ph( + 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_rcp_ph() { + let a = _mm256_set1_ph(2.0); + let r = _mm256_maskz_rcp_ph(0b0101010101010101, a); + let e = _mm256_set_ph( + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fnmadd_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); + unsafe fn test_mm512_rcp_ph() { + let a = _mm512_set1_ph(2.0); + let r = _mm512_rcp_ph(a); + let e = _mm512_set1_ph(0.5); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fnmadd_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 0, b, c, - ); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 1, b, c, + unsafe fn test_mm512_mask_rcp_ph() { + let a = _mm512_set1_ph(2.0); + let src = _mm512_set1_ph(1.0); + let r = _mm512_mask_rcp_ph(src, 0b01010101010101010101010101010101, a); + let e = _mm512_set_ph( + 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, + 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, ); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fnmadd_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 0, - ); - let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - assert_eq_m128h(r, e); - let r = _mm_mask3_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 1, + unsafe fn test_mm512_maskz_rcp_ph() { + let a = _mm512_set1_ph(2.0); + let r = _mm512_maskz_rcp_ph(0b01010101010101010101010101010101, a); + let e = _mm512_set_ph( + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, + 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, ); - let e = _mm_setr_ph(1.0, 30., 31., 32., 33., 34., 35., 36.); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_rcp_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_rcp_sh(a, b); + let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fnmadd_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0, a, b, c, - ); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_mask_rcp_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_rcp_sh(src, 0, a, b); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - let r = _mm_maskz_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 1, a, b, c, - ); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_mask_rcp_sh(src, 1, a, b); + let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fnmsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_fnmsub_ph(a, b, c); - let e = _mm_set1_ph(-5.0); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_rcp_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_maskz_rcp_sh(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_rcp_sh(1, a, b); + let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fnmsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask_fnmsub_ph(a, 0b01010101, b, c); - let e = _mm_set_ph(1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0); + unsafe fn test_mm_rsqrt_ph() { + let a = _mm_set1_ph(4.0); + let r = _mm_rsqrt_ph(a); + let e = _mm_set1_ph(0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fnmsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask3_fnmsub_ph(a, b, c, 0b01010101); - let e = _mm_set_ph(3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0); + unsafe fn test_mm_mask_rsqrt_ph() { + let a = _mm_set1_ph(4.0); + let src = _mm_set1_ph(1.0); + let r = _mm_mask_rsqrt_ph(src, 0b01010101, a); + let e = _mm_set_ph(1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fnmsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_maskz_fnmsub_ph(0b01010101, a, b, c); - let e = _mm_set_ph(0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0); + unsafe fn test_mm_maskz_rsqrt_ph() { + let a = _mm_set1_ph(4.0); + let r = _mm_maskz_rsqrt_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fnmsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_fnmsub_ph(a, b, c); - let e = _mm256_set1_ph(-5.0); + unsafe fn test_mm256_rsqrt_ph() { + let a = _mm256_set1_ph(4.0); + let r = _mm256_rsqrt_ph(a); + let e = _mm256_set1_ph(0.5); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fnmsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask_fnmsub_ph(a, 0b0101010101010101, b, c); + unsafe fn test_mm256_mask_rsqrt_ph() { + let a = _mm256_set1_ph(4.0); + let src = _mm256_set1_ph(1.0); + let r = _mm256_mask_rsqrt_ph(src, 0b0101010101010101, a); let e = _mm256_set_ph( - 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, + 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fnmsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask3_fnmsub_ph(a, b, c, 0b0101010101010101); + unsafe fn test_mm256_maskz_rsqrt_ph() { + let a = _mm256_set1_ph(4.0); + let r = _mm256_maskz_rsqrt_ph(0b0101010101010101, a); let e = _mm256_set_ph( - 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, ); assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fnmsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_maskz_fnmsub_ph(0b0101010101010101, a, b, c); - let e = _mm256_set_ph( - 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, - ); - assert_eq_m256h(r, e); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_rsqrt_ph() { + let a = _mm512_set1_ph(4.0); + let r = _mm512_rsqrt_ph(a); + let e = _mm512_set1_ph(0.5); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fnmsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_fnmsub_ph(a, b, c); - let e = _mm512_set1_ph(-5.0); + unsafe fn test_mm512_mask_rsqrt_ph() { + let a = _mm512_set1_ph(4.0); + let src = _mm512_set1_ph(1.0); + let r = _mm512_mask_rsqrt_ph(src, 0b01010101010101010101010101010101, a); + let e = _mm512_set_ph( + 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, + 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fnmsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fnmsub_ph(a, 0b01010101010101010101010101010101, b, c); + unsafe fn test_mm512_maskz_rsqrt_ph() { + let a = _mm512_set1_ph(4.0); + let r = _mm512_maskz_rsqrt_ph(0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, - 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, + 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_rsqrt_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let r = _mm_rsqrt_sh(a, b); + let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_rsqrt_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_rsqrt_sh(src, 0, a, b); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_mask_rsqrt_sh(src, 1, a, b); + let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_rsqrt_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let r = _mm_maskz_rsqrt_sh(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_rsqrt_sh(1, a, b); + let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_sqrt_ph() { + let a = _mm_set1_ph(4.0); + let r = _mm_sqrt_ph(a); + let e = _mm_set1_ph(2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_sqrt_ph() { + let a = _mm_set1_ph(4.0); + let src = _mm_set1_ph(1.0); + let r = _mm_mask_sqrt_ph(src, 0b01010101, a); + let e = _mm_set_ph(1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_sqrt_ph() { + let a = _mm_set1_ph(4.0); + let r = _mm_maskz_sqrt_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_sqrt_ph() { + let a = _mm256_set1_ph(4.0); + let r = _mm256_sqrt_ph(a); + let e = _mm256_set1_ph(2.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_sqrt_ph() { + let a = _mm256_set1_ph(4.0); + let src = _mm256_set1_ph(1.0); + let r = _mm256_mask_sqrt_ph(src, 0b0101010101010101, a); + let e = _mm256_set_ph( + 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_sqrt_ph() { + let a = _mm256_set1_ph(4.0); + let r = _mm256_maskz_sqrt_ph(0b0101010101010101, a); + let e = _mm256_set_ph( + 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, ); - assert_eq_m512h(r, e); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fnmsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fnmsub_ph(a, b, c, 0b01010101010101010101010101010101); - let e = _mm512_set_ph( - 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, - 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, - ); + unsafe fn test_mm512_sqrt_ph() { + let a = _mm512_set1_ph(4.0); + let r = _mm512_sqrt_ph(a); + let e = _mm512_set1_ph(2.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fnmsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fnmsub_ph(0b01010101010101010101010101010101, a, b, c); + unsafe fn test_mm512_mask_sqrt_ph() { + let a = _mm512_set1_ph(4.0); + let src = _mm512_set1_ph(1.0); + let r = _mm512_mask_sqrt_ph(src, 0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, - 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, + 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, + 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fnmsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = - _mm512_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set1_ph(-5.0); + unsafe fn test_mm512_maskz_sqrt_ph() { + let a = _mm512_set1_ph(4.0); + let r = _mm512_maskz_sqrt_ph(0b01010101010101010101010101010101, a); + let e = _mm512_set_ph( + 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, + 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fnmsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, - 0b01010101010101010101010101010101, - b, - c, - ); - let e = _mm512_set_ph( - 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, - 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, - ); + unsafe fn test_mm512_sqrt_round_ph() { + let a = _mm512_set1_ph(4.0); + let r = _mm512_sqrt_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + let e = _mm512_set1_ph(2.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fnmsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, - b, - c, + unsafe fn test_mm512_mask_sqrt_round_ph() { + let a = _mm512_set1_ph(4.0); + let src = _mm512_set1_ph(1.0); + let r = _mm512_mask_sqrt_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0b01010101010101010101010101010101, + a, ); let e = _mm512_set_ph( - 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, - 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, + 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, + 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fnmsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_maskz_sqrt_round_ph() { + let a = _mm512_set1_ph(4.0); + let r = _mm512_maskz_sqrt_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b01010101010101010101010101010101, a, - b, - c, ); let e = _mm512_set_ph( - 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, - 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, + 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, + 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fnmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fnmsub_sh(a, b, c); - let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fnmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fnmsub_sh(a, 0, b, c); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_fnmsub_sh(a, 1, b, c); - let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_sqrt_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let r = _mm_sqrt_sh(a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fnmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fnmsub_sh(a, b, c, 0); - let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + unsafe fn test_mm_mask_sqrt_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_sqrt_sh(src, 0, a, b); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - let r = _mm_mask3_fnmsub_sh(a, b, c, 1); - let e = _mm_setr_ph(-5.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_sqrt_sh(src, 1, a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fnmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fnmsub_sh(0, a, b, c); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_maskz_fnmsub_sh(1, a, b, c); - let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_maskz_sqrt_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let r = _mm_maskz_sqrt_sh(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fnmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_maskz_sqrt_sh(1, a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fnmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 0, b, c, - ); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 1, b, c, - ); - let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_sqrt_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let r = _mm_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fnmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 0, + unsafe fn test_mm_mask_sqrt_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, ); - let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - let r = _mm_mask3_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 1, + let r = _mm_mask_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, ); - let e = _mm_setr_ph(-5.0, 30., 31., 32., 33., 34., 35., 36.); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fnmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0, a, b, c, - ); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_maskz_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 1, a, b, c, - ); - let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_maskz_sqrt_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let r = + _mm_maskz_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmaddsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_fmaddsub_ph(a, b, c); - let e = _mm_set_ph(5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0); + let r = + _mm_maskz_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmaddsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask_fmaddsub_ph(a, 0b00110011, b, c); - let e = _mm_set_ph(1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0); + unsafe fn test_mm_max_ph() { + let a = _mm_set1_ph(2.0); + let b = _mm_set1_ph(1.0); + let r = _mm_max_ph(a, b); + let e = _mm_set1_ph(2.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fmaddsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask3_fmaddsub_ph(a, b, c, 0b00110011); - let e = _mm_set_ph(3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0); + unsafe fn test_mm_mask_max_ph() { + let a = _mm_set1_ph(2.0); + let b = _mm_set1_ph(1.0); + let src = _mm_set1_ph(3.0); + let r = _mm_mask_max_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmaddsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_maskz_fmaddsub_ph(0b00110011, a, b, c); - let e = _mm_set_ph(0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0); + unsafe fn test_mm_maskz_max_ph() { + let a = _mm_set1_ph(2.0); + let b = _mm_set1_ph(1.0); + let r = _mm_maskz_max_ph(0b01010101, a, b); + let e = _mm_set_ph(0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmaddsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_fmaddsub_ph(a, b, c); - let e = _mm256_set_ph( - 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, - ); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmaddsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask_fmaddsub_ph(a, 0b0011001100110011, b, c); - let e = _mm256_set_ph( - 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, - ); + unsafe fn test_mm256_max_ph() { + let a = _mm256_set1_ph(2.0); + let b = _mm256_set1_ph(1.0); + let r = _mm256_max_ph(a, b); + let e = _mm256_set1_ph(2.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fmaddsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask3_fmaddsub_ph(a, b, c, 0b0011001100110011); + unsafe fn test_mm256_mask_max_ph() { + let a = _mm256_set1_ph(2.0); + let b = _mm256_set1_ph(1.0); + let src = _mm256_set1_ph(3.0); + let r = _mm256_mask_max_ph(src, 0b0101010101010101, a, b); let e = _mm256_set_ph( - 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, + 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmaddsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_maskz_fmaddsub_ph(0b0011001100110011, a, b, c); + unsafe fn test_mm256_maskz_max_ph() { + let a = _mm256_set1_ph(2.0); + let b = _mm256_set1_ph(1.0); + let r = _mm256_maskz_max_ph(0b0101010101010101, a, b); let e = _mm256_set_ph( - 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, + 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, ); assert_eq_m256h(r, e); } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmaddsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_fmaddsub_ph(a, b, c); - let e = _mm512_set_ph( - 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, - 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, - ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmaddsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmaddsub_ph(a, 0b00110011001100110011001100110011, b, c); - let e = _mm512_set_ph( - 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, - 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, - ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmaddsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmaddsub_ph(a, b, c, 0b00110011001100110011001100110011); - let e = _mm512_set_ph( - 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, - 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, - ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmaddsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmaddsub_ph(0b00110011001100110011001100110011, a, b, c); - let e = _mm512_set_ph( - 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, - 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, - ); + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_max_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_max_ph(a, b); + let e = _mm512_set1_ph(2.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmaddsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = - _mm512_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + unsafe fn test_mm512_mask_max_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let src = _mm512_set1_ph(3.0); + let r = _mm512_mask_max_ph(src, 0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, - 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, + 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, + 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmaddsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, - 0b00110011001100110011001100110011, - b, - c, - ); + unsafe fn test_mm512_maskz_max_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_maskz_max_ph(0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, - 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, + 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, + 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmaddsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_max_round_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_max_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_ph(2.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_max_round_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let src = _mm512_set1_ph(3.0); + let r = _mm512_mask_max_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, a, b, - c, - 0b00110011001100110011001100110011, ); let e = _mm512_set_ph( - 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, - 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, + 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, + 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmaddsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b00110011001100110011001100110011, + unsafe fn test_mm512_maskz_max_round_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_maskz_max_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, a, b, - c, ); let e = _mm512_set_ph( - 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, - 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, + 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, + 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, ); assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmsubadd_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_fmsubadd_ph(a, b, c); - let e = _mm_set_ph(-1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_max_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_max_sh(a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmsubadd_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask_fmsubadd_ph(a, 0b00110011, b, c); - let e = _mm_set_ph(1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_max_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_max_sh(src, 0, a, b); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_mask_max_sh(src, 1, a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_max_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_maskz_max_sh(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_max_sh(1, a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_max_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_max_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_mask_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_max_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = + _mm_maskz_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fmsubadd_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask3_fmsubadd_ph(a, b, c, 0b00110011); - let e = _mm_set_ph(3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0); + unsafe fn test_mm_min_ph() { + let a = _mm_set1_ph(2.0); + let b = _mm_set1_ph(1.0); + let r = _mm_min_ph(a, b); + let e = _mm_set1_ph(1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmsubadd_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_maskz_fmsubadd_ph(0b00110011, a, b, c); - let e = _mm_set_ph(0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0); + unsafe fn test_mm_mask_min_ph() { + let a = _mm_set1_ph(2.0); + let b = _mm_set1_ph(1.0); + let src = _mm_set1_ph(3.0); + let r = _mm_mask_min_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmsubadd_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_fmsubadd_ph(a, b, c); - let e = _mm256_set_ph( - -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, - ); - assert_eq_m256h(r, e); + unsafe fn test_mm_maskz_min_ph() { + let a = _mm_set1_ph(2.0); + let b = _mm_set1_ph(1.0); + let r = _mm_maskz_min_ph(0b01010101, a, b); + let e = _mm_set_ph(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmsubadd_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask_fmsubadd_ph(a, 0b0011001100110011, b, c); - let e = _mm256_set_ph( - 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, - ); + unsafe fn test_mm256_min_ph() { + let a = _mm256_set1_ph(2.0); + let b = _mm256_set1_ph(1.0); + let r = _mm256_min_ph(a, b); + let e = _mm256_set1_ph(1.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fmsubadd_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask3_fmsubadd_ph(a, b, c, 0b0011001100110011); + unsafe fn test_mm256_mask_min_ph() { + let a = _mm256_set1_ph(2.0); + let b = _mm256_set1_ph(1.0); + let src = _mm256_set1_ph(3.0); + let r = _mm256_mask_min_ph(src, 0b0101010101010101, a, b); let e = _mm256_set_ph( - 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmsubadd_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_maskz_fmsubadd_ph(0b0011001100110011, a, b, c); + unsafe fn test_mm256_maskz_min_ph() { + let a = _mm256_set1_ph(2.0); + let b = _mm256_set1_ph(1.0); + let r = _mm256_maskz_min_ph(0b0101010101010101, a, b); let e = _mm256_set_ph( - 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmsubadd_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_fmsubadd_ph(a, b, c); - let e = _mm512_set_ph( - -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, - -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, - ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmsubadd_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmsubadd_ph(a, 0b00110011001100110011001100110011, b, c); - let e = _mm512_set_ph( - 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, - 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, - ); + unsafe fn test_mm512_min_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_min_ph(a, b); + let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmsubadd_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmsubadd_ph(a, b, c, 0b00110011001100110011001100110011); + unsafe fn test_mm512_mask_min_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let src = _mm512_set1_ph(3.0); + let r = _mm512_mask_min_ph(src, 0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, - 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, + 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmsubadd_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmsubadd_ph(0b00110011001100110011001100110011, a, b, c); + unsafe fn test_mm512_maskz_min_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_maskz_min_ph(0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, - 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmsubadd_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = - _mm512_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set_ph( - -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, - -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, - ); + unsafe fn test_mm512_min_round_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_min_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmsubadd_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask_min_round_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let src = _mm512_set1_ph(3.0); + let r = _mm512_mask_min_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, a, - 0b00110011001100110011001100110011, b, - c, ); let e = _mm512_set_ph( - 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, - 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, + 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmsubadd_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_maskz_min_round_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_maskz_min_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, a, b, - c, - 0b00110011001100110011001100110011, ); let e = _mm512_set_ph( - 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, - 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmsubadd_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b00110011001100110011001100110011, - a, - b, - c, + unsafe fn test_mm_min_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_min_sh(a, b); + let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_min_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_min_sh(src, 0, a, b); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_mask_min_sh(src, 1, a, b); + let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_min_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_maskz_min_sh(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_min_sh(1, a, b); + let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_min_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_min_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, ); - let e = _mm512_set_ph( - 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, - 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_mask_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, ); - assert_eq_m512h(r, e); + let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_min_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = + _mm_maskz_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); } } From 487210840bf3ef502b130228a10dcfb57246660e Mon Sep 17 00:00:00 2001 From: sayantn Date: Mon, 15 Jul 2024 16:01:06 +0530 Subject: [PATCH 06/11] AVX512FP16 Part 5: FP-Support `getexp`, `getmant`, `roundscale`, `scalef`, `reduce` --- crates/core_arch/missing-x86.md | 90 - crates/core_arch/src/x86/avx512fp16.rs | 10447 +++++++++++++++-------- 2 files changed, 6866 insertions(+), 3671 deletions(-) diff --git a/crates/core_arch/missing-x86.md b/crates/core_arch/missing-x86.md index c0b8aa1457..72fc8b840e 100644 --- a/crates/core_arch/missing-x86.md +++ b/crates/core_arch/missing-x86.md @@ -103,10 +103,6 @@ * [ ] [`_mm512_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtxph_ps) * [ ] [`_mm512_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtxps_ph) * [ ] [`_mm512_fpclass_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fpclass_ph_mask) - * [ ] [`_mm512_getexp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_getexp_ph) - * [ ] [`_mm512_getexp_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_getexp_round_ph) - * [ ] [`_mm512_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_getmant_ph) - * [ ] [`_mm512_getmant_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_getmant_round_ph) * [ ] [`_mm512_mask_blend_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_blend_ph) * [ ] [`_mm512_mask_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_ph_mask) * [ ] [`_mm512_mask_cmp_round_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_round_ph_mask) @@ -155,16 +151,6 @@ * [ ] [`_mm512_mask_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtxph_ps) * [ ] [`_mm512_mask_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtxps_ph) * [ ] [`_mm512_mask_fpclass_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fpclass_ph_mask) - * [ ] [`_mm512_mask_getexp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_getexp_ph) - * [ ] [`_mm512_mask_getexp_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_getexp_round_ph) - * [ ] [`_mm512_mask_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_getmant_ph) - * [ ] [`_mm512_mask_getmant_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_getmant_round_ph) - * [ ] [`_mm512_mask_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_ph) - * [ ] [`_mm512_mask_reduce_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_round_ph) - * [ ] [`_mm512_mask_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_roundscale_ph) - * [ ] [`_mm512_mask_roundscale_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_roundscale_round_ph) - * [ ] [`_mm512_mask_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_scalef_ph) - * [ ] [`_mm512_mask_scalef_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_scalef_round_ph) * [ ] [`_mm512_maskz_cvt_roundepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepi16_ph) * [ ] [`_mm512_maskz_cvt_roundepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepi32_ph) * [ ] [`_mm512_maskz_cvt_roundepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepi64_ph) @@ -209,28 +195,12 @@ * [ ] [`_mm512_maskz_cvtx_roundps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtx_roundps_ph) * [ ] [`_mm512_maskz_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtxph_ps) * [ ] [`_mm512_maskz_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtxps_ph) - * [ ] [`_mm512_maskz_getexp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_getexp_ph) - * [ ] [`_mm512_maskz_getexp_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_getexp_round_ph) - * [ ] [`_mm512_maskz_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_getmant_ph) - * [ ] [`_mm512_maskz_getmant_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_getmant_round_ph) - * [ ] [`_mm512_maskz_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_reduce_ph) - * [ ] [`_mm512_maskz_reduce_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_reduce_round_ph) - * [ ] [`_mm512_maskz_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_roundscale_ph) - * [ ] [`_mm512_maskz_roundscale_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_roundscale_round_ph) - * [ ] [`_mm512_maskz_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_scalef_ph) - * [ ] [`_mm512_maskz_scalef_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_scalef_round_ph) * [ ] [`_mm512_permutex2var_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutex2var_ph) * [ ] [`_mm512_permutexvar_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutexvar_ph) * [ ] [`_mm512_reduce_add_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_add_ph) * [ ] [`_mm512_reduce_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_max_ph) * [ ] [`_mm512_reduce_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_min_ph) * [ ] [`_mm512_reduce_mul_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_mul_ph) - * [ ] [`_mm512_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_ph) - * [ ] [`_mm512_reduce_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_round_ph) - * [ ] [`_mm512_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_roundscale_ph) - * [ ] [`_mm512_roundscale_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_roundscale_round_ph) - * [ ] [`_mm512_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_scalef_ph) - * [ ] [`_mm512_scalef_round_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_scalef_round_ph) * [ ] [`_mm512_set1_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set1_pch) * [ ] [`_mm_cvt_roundi32_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundi32_sh) * [ ] [`_mm_cvt_roundi64_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundi64_sh) @@ -268,10 +238,6 @@ * [ ] [`_mm_cvtu32_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtu32_sh) * [ ] [`_mm_cvtu64_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtu64_sh) * [ ] [`_mm_fpclass_sh_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fpclass_sh_mask) - * [ ] [`_mm_getexp_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getexp_round_sh) - * [ ] [`_mm_getexp_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getexp_sh) - * [ ] [`_mm_getmant_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getmant_round_sh) - * [ ] [`_mm_getmant_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getmant_sh) * [ ] [`_mm_mask_cvt_roundsd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvt_roundsd_sh) * [ ] [`_mm_mask_cvt_roundsh_sd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvt_roundsh_sd) * [ ] [`_mm_mask_cvt_roundsh_ss`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvt_roundsh_ss) @@ -281,16 +247,6 @@ * [ ] [`_mm_mask_cvtsh_ss`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsh_ss) * [ ] [`_mm_mask_cvtss_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtss_sh) * [ ] [`_mm_mask_fpclass_sh_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fpclass_sh_mask) - * [ ] [`_mm_mask_getexp_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getexp_round_sh) - * [ ] [`_mm_mask_getexp_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getexp_sh) - * [ ] [`_mm_mask_getmant_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getmant_round_sh) - * [ ] [`_mm_mask_getmant_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getmant_sh) - * [ ] [`_mm_mask_reduce_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_reduce_round_sh) - * [ ] [`_mm_mask_reduce_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_reduce_sh) - * [ ] [`_mm_mask_roundscale_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_roundscale_round_sh) - * [ ] [`_mm_mask_roundscale_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_roundscale_sh) - * [ ] [`_mm_mask_scalef_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_scalef_round_sh) - * [ ] [`_mm_mask_scalef_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_scalef_sh) * [ ] [`_mm_maskz_cvt_roundsd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvt_roundsd_sh) * [ ] [`_mm_maskz_cvt_roundsh_sd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvt_roundsh_sd) * [ ] [`_mm_maskz_cvt_roundsh_ss`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvt_roundsh_ss) @@ -299,22 +255,6 @@ * [ ] [`_mm_maskz_cvtsh_sd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtsh_sd) * [ ] [`_mm_maskz_cvtsh_ss`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtsh_ss) * [ ] [`_mm_maskz_cvtss_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtss_sh) - * [ ] [`_mm_maskz_getexp_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getexp_round_sh) - * [ ] [`_mm_maskz_getexp_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getexp_sh) - * [ ] [`_mm_maskz_getmant_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getmant_round_sh) - * [ ] [`_mm_maskz_getmant_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getmant_sh) - * [ ] [`_mm_maskz_reduce_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_reduce_round_sh) - * [ ] [`_mm_maskz_reduce_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_reduce_sh) - * [ ] [`_mm_maskz_roundscale_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_roundscale_round_sh) - * [ ] [`_mm_maskz_roundscale_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_roundscale_sh) - * [ ] [`_mm_maskz_scalef_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_scalef_round_sh) - * [ ] [`_mm_maskz_scalef_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_scalef_sh) - * [ ] [`_mm_reduce_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_round_sh) - * [ ] [`_mm_reduce_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_sh) - * [ ] [`_mm_roundscale_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_roundscale_round_sh) - * [ ] [`_mm_roundscale_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_roundscale_sh) - * [ ] [`_mm_scalef_round_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_scalef_round_sh) - * [ ] [`_mm_scalef_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_scalef_sh) * [ ] [`_mm_set1_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set1_pch)

@@ -345,8 +285,6 @@ * [ ] [`_mm256_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtxph_ps) * [ ] [`_mm256_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtxps_ph) * [ ] [`_mm256_fpclass_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fpclass_ph_mask) - * [ ] [`_mm256_getexp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_getexp_ph) - * [ ] [`_mm256_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_getmant_ph) * [ ] [`_mm256_mask_blend_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_blend_ph) * [ ] [`_mm256_mask_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmp_ph_mask) * [ ] [`_mm256_mask_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi16_ph) @@ -372,11 +310,6 @@ * [ ] [`_mm256_mask_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtxph_ps) * [ ] [`_mm256_mask_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtxps_ph) * [ ] [`_mm256_mask_fpclass_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fpclass_ph_mask) - * [ ] [`_mm256_mask_getexp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_getexp_ph) - * [ ] [`_mm256_mask_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_getmant_ph) - * [ ] [`_mm256_mask_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_reduce_ph) - * [ ] [`_mm256_mask_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_roundscale_ph) - * [ ] [`_mm256_mask_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_scalef_ph) * [ ] [`_mm256_maskz_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi16_ph) * [ ] [`_mm256_maskz_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi32_ph) * [ ] [`_mm256_maskz_cvtepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi64_ph) @@ -399,20 +332,12 @@ * [ ] [`_mm256_maskz_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvttph_epu64) * [ ] [`_mm256_maskz_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtxph_ps) * [ ] [`_mm256_maskz_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtxps_ph) - * [ ] [`_mm256_maskz_getexp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_getexp_ph) - * [ ] [`_mm256_maskz_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_getmant_ph) - * [ ] [`_mm256_maskz_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_reduce_ph) - * [ ] [`_mm256_maskz_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_roundscale_ph) - * [ ] [`_mm256_maskz_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_scalef_ph) * [ ] [`_mm256_permutex2var_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutex2var_ph) * [ ] [`_mm256_permutexvar_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutexvar_ph) * [ ] [`_mm256_reduce_add_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_reduce_add_ph) * [ ] [`_mm256_reduce_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_reduce_max_ph) * [ ] [`_mm256_reduce_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_reduce_min_ph) * [ ] [`_mm256_reduce_mul_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_reduce_mul_ph) - * [ ] [`_mm256_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_reduce_ph) - * [ ] [`_mm256_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_roundscale_ph) - * [ ] [`_mm256_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_scalef_ph) * [ ] [`_mm_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_ph_mask) * [ ] [`_mm_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi16_ph) * [ ] [`_mm_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi32_ph) @@ -437,8 +362,6 @@ * [ ] [`_mm_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtxph_ps) * [ ] [`_mm_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtxps_ph) * [ ] [`_mm_fpclass_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fpclass_ph_mask) - * [ ] [`_mm_getexp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getexp_ph) - * [ ] [`_mm_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getmant_ph) * [ ] [`_mm_mask_blend_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_blend_ph) * [ ] [`_mm_mask_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_ph_mask) * [ ] [`_mm_mask_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi16_ph) @@ -464,11 +387,6 @@ * [ ] [`_mm_mask_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtxph_ps) * [ ] [`_mm_mask_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtxps_ph) * [ ] [`_mm_mask_fpclass_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fpclass_ph_mask) - * [ ] [`_mm_mask_getexp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getexp_ph) - * [ ] [`_mm_mask_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getmant_ph) - * [ ] [`_mm_mask_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_reduce_ph) - * [ ] [`_mm_mask_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_roundscale_ph) - * [ ] [`_mm_mask_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_scalef_ph) * [ ] [`_mm_maskz_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi16_ph) * [ ] [`_mm_maskz_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi32_ph) * [ ] [`_mm_maskz_cvtepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi64_ph) @@ -491,20 +409,12 @@ * [ ] [`_mm_maskz_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvttph_epu64) * [ ] [`_mm_maskz_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtxph_ps) * [ ] [`_mm_maskz_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtxps_ph) - * [ ] [`_mm_maskz_getexp_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getexp_ph) - * [ ] [`_mm_maskz_getmant_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getmant_ph) - * [ ] [`_mm_maskz_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_reduce_ph) - * [ ] [`_mm_maskz_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_roundscale_ph) - * [ ] [`_mm_maskz_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_scalef_ph) * [ ] [`_mm_permutex2var_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permutex2var_ph) * [ ] [`_mm_permutexvar_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permutexvar_ph) * [ ] [`_mm_reduce_add_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_add_ph) * [ ] [`_mm_reduce_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_max_ph) * [ ] [`_mm_reduce_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_min_ph) * [ ] [`_mm_reduce_mul_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_mul_ph) - * [ ] [`_mm_reduce_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_ph) - * [ ] [`_mm_roundscale_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_roundscale_ph) - * [ ] [`_mm_scalef_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_scalef_ph)

diff --git a/crates/core_arch/src/x86/avx512fp16.rs b/crates/core_arch/src/x86/avx512fp16.rs index b30bc63ed4..3c04d9ae90 100644 --- a/crates/core_arch/src/x86/avx512fp16.rs +++ b/crates/core_arch/src/x86/avx512fp16.rs @@ -624,12 +624,13 @@ pub unsafe fn _mm512_zextph128_ph512(a: __m128h) -> __m512h { #[target_feature(enable = "avx512fp16")] #[rustc_legacy_const_generics(2, 3)] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub unsafe fn _mm_cmp_round_sh_mask( +pub unsafe fn _mm_cmp_round_sh_mask( a: __m128h, b: __m128h, ) -> __mmask8 { + static_assert_uimm_bits!(IMM5, 5); static_assert_sae!(SAE); - _mm_mask_cmp_round_sh_mask::(0xff, a, b) + _mm_mask_cmp_round_sh_mask::(0xff, a, b) } /// Compare the lower half-precision (16-bit) floating-point elements in a and b based on the comparison @@ -641,13 +642,14 @@ pub unsafe fn _mm_cmp_round_sh_mask( #[target_feature(enable = "avx512fp16")] #[rustc_legacy_const_generics(3, 4)] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub unsafe fn _mm_mask_cmp_round_sh_mask( +pub unsafe fn _mm_mask_cmp_round_sh_mask( k1: __mmask8, a: __m128h, b: __m128h, ) -> __mmask8 { + static_assert_uimm_bits!(IMM5, 5); static_assert_sae!(SAE); - vcmpsh(a, b, IMM8, k1, SAE) + vcmpsh(a, b, IMM5, k1, SAE) } /// Compare the lower half-precision (16-bit) floating-point elements in a and b based on the comparison @@ -658,8 +660,9 @@ pub unsafe fn _mm_mask_cmp_round_sh_mask( #[target_feature(enable = "avx512fp16")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub unsafe fn _mm_cmp_sh_mask(a: __m128h, b: __m128h) -> __mmask8 { - _mm_cmp_round_sh_mask::(a, b) +pub unsafe fn _mm_cmp_sh_mask(a: __m128h, b: __m128h) -> __mmask8 { + static_assert_uimm_bits!(IMM5, 5); + _mm_cmp_round_sh_mask::(a, b) } /// Compare the lower half-precision (16-bit) floating-point elements in a and b based on the comparison @@ -670,12 +673,13 @@ pub unsafe fn _mm_cmp_sh_mask(a: __m128h, b: __m128h) -> __mmas #[target_feature(enable = "avx512fp16")] #[rustc_legacy_const_generics(3)] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub unsafe fn _mm_mask_cmp_sh_mask( +pub unsafe fn _mm_mask_cmp_sh_mask( k1: __mmask8, a: __m128h, b: __m128h, ) -> __mmask8 { - _mm_mask_cmp_round_sh_mask::(k1, a, b) + static_assert_uimm_bits!(IMM5, 5); + _mm_mask_cmp_round_sh_mask::(k1, a, b) } /// Cast vector of type `__m256h` to type `__m512h`. The upper 16 elements of the result are zeroed. @@ -706,9 +710,10 @@ pub unsafe fn _mm512_zextph256_ph512(a: __m256h) -> __m512h { #[target_feature(enable = "avx512fp16")] #[rustc_legacy_const_generics(2, 3)] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub unsafe fn _mm_comi_round_sh(a: __m128h, b: __m128h) -> i32 { +pub unsafe fn _mm_comi_round_sh(a: __m128h, b: __m128h) -> i32 { + static_assert_uimm_bits!(IMM5, 5); static_assert_sae!(SAE); - vcomish(a, b, IMM8, SAE) + vcomish(a, b, IMM5, SAE) } /// Compare the lower half-precision (16-bit) floating-point elements in a and b based on the comparison @@ -719,8 +724,9 @@ pub unsafe fn _mm_comi_round_sh(a: __m128h, b: #[target_feature(enable = "avx512fp16")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub unsafe fn _mm_comi_sh(a: __m128h, b: __m128h) -> i32 { - _mm_comi_round_sh::(a, b) +pub unsafe fn _mm_comi_sh(a: __m128h, b: __m128h) -> i32 { + static_assert_uimm_bits!(IMM5, 5); + _mm_comi_round_sh::(a, b) } /// Compare the lower half-precision (16-bit) floating-point elements in a and b for equality, and return @@ -8440,6033 +8446,9312 @@ pub unsafe fn _mm_maskz_min_round_sh( _mm_mask_min_round_sh::(_mm_setzero_ph(), k, a, b) } -#[allow(improper_ctypes)] -extern "C" { - #[link_name = "llvm.x86.avx512fp16.mask.cmp.sh"] - fn vcmpsh(a: __m128h, b: __m128h, imm8: i32, mask: __mmask8, sae: i32) -> __mmask8; - #[link_name = "llvm.x86.avx512fp16.vcomi.sh"] - fn vcomish(a: __m128h, b: __m128h, imm8: i32, sae: i32) -> i32; +/// Convert the exponent of each packed half-precision (16-bit) floating-point element in a to a half-precision +/// (16-bit) floating-point number representing the integer exponent, and store the results in dst. +/// This intrinsic essentially calculates `floor(log2(x))` for each element. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getexp_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vgetexpph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_getexp_ph(a: __m128h) -> __m128h { + _mm_mask_getexp_ph(_mm_undefined_ph(), 0xff, a) +} - #[link_name = "llvm.x86.avx512fp16.add.ph.512"] - fn vaddph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.sub.ph.512"] - fn vsubph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mul.ph.512"] - fn vmulph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.div.ph.512"] - fn vdivph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; +/// Convert the exponent of each packed half-precision (16-bit) floating-point element in a to a half-precision +/// (16-bit) floating-point number representing the integer exponent, and store the results in dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). This intrinsic essentially calculates +/// `floor(log2(x))` for each element. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getexp_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vgetexpph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_getexp_ph(src: __m128h, k: __mmask8, a: __m128h) -> __m128h { + vgetexpph_128(a, src, k) +} - #[link_name = "llvm.x86.avx512fp16.mask.add.sh.round"] - fn vaddsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.sub.sh.round"] - fn vsubsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.mul.sh.round"] - fn vmulsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.div.sh.round"] - fn vdivsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; +/// Convert the exponent of each packed half-precision (16-bit) floating-point element in a to a half-precision +/// (16-bit) floating-point number representing the integer exponent, and store the results in dst using zeromask +/// k (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates +/// `floor(log2(x))` for each element. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getexp_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vgetexpph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_getexp_ph(k: __mmask8, a: __m128h) -> __m128h { + _mm_mask_getexp_ph(_mm_setzero_ph(), k, a) +} - #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.128"] - fn vfmulcph_128(a: __m128, b: __m128, src: __m128, k: __mmask8) -> __m128; - #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.256"] - fn vfmulcph_256(a: __m256, b: __m256, src: __m256, k: __mmask8) -> __m256; - #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.512"] - fn vfmulcph_512(a: __m512, b: __m512, src: __m512, k: __mmask16, rounding: i32) -> __m512; - #[link_name = "llvm.x86.avx512fp16.mask.vfmul.csh"] - fn vfmulcsh(a: __m128, b: __m128, src: __m128, k: __mmask8, rounding: i32) -> __m128; +/// Convert the exponent of each packed half-precision (16-bit) floating-point element in a to a half-precision +/// (16-bit) floating-point number representing the integer exponent, and store the results in dst. +/// This intrinsic essentially calculates `floor(log2(x))` for each element. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_getexp_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vgetexpph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_getexp_ph(a: __m256h) -> __m256h { + _mm256_mask_getexp_ph(_mm256_undefined_ph(), 0xffff, a) +} - #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.128"] - fn vfcmulcph_128(a: __m128, b: __m128, src: __m128, k: __mmask8) -> __m128; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.256"] - fn vfcmulcph_256(a: __m256, b: __m256, src: __m256, k: __mmask8) -> __m256; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.512"] - fn vfcmulcph_512(a: __m512, b: __m512, src: __m512, k: __mmask16, rounding: i32) -> __m512; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.csh"] - fn vfcmulcsh(a: __m128, b: __m128, src: __m128, k: __mmask8, rounding: i32) -> __m128; +/// Convert the exponent of each packed half-precision (16-bit) floating-point element in a to a half-precision +/// (16-bit) floating-point number representing the integer exponent, and store the results in dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). This intrinsic essentially calculates +/// `floor(log2(x))` for each element. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_getexp_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vgetexpph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_getexp_ph(src: __m256h, k: __mmask16, a: __m256h) -> __m256h { + vgetexpph_256(a, src, k) +} - #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.128"] - fn vfmaddcph_mask3_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; - #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.128"] - fn vfmaddcph_maskz_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; - #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.256"] - fn vfmaddcph_mask3_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; - #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.256"] - fn vfmaddcph_maskz_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; - #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.512"] - fn vfmaddcph_mask3_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) -> __m512; - #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.512"] - fn vfmaddcph_maskz_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) -> __m512; - #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.csh"] - fn vfmaddcsh_mask(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; - #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.csh"] - fn vfmaddcsh_maskz(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; +/// Convert the exponent of each packed half-precision (16-bit) floating-point element in a to a half-precision +/// (16-bit) floating-point number representing the integer exponent, and store the results in dst using zeromask +/// k (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates +/// `floor(log2(x))` for each element. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_getexp_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vgetexpph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_getexp_ph(k: __mmask16, a: __m256h) -> __m256h { + _mm256_mask_getexp_ph(_mm256_setzero_ph(), k, a) +} - #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.128"] - fn vfcmaddcph_mask3_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; - #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.128"] - fn vfcmaddcph_maskz_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.256"] - fn vfcmaddcph_mask3_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; - #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.256"] - fn vfcmaddcph_maskz_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.512"] - fn vfcmaddcph_mask3_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) - -> __m512; - #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.512"] - fn vfcmaddcph_maskz_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) - -> __m512; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.csh"] - fn vfcmaddcsh_mask(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; - #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.csh"] - fn vfcmaddcsh_maskz(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; +/// Convert the exponent of each packed half-precision (16-bit) floating-point element in a to a half-precision +/// (16-bit) floating-point number representing the integer exponent, and store the results in dst. +/// This intrinsic essentially calculates `floor(log2(x))` for each element. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_getexp_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vgetexpph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_getexp_ph(a: __m512h) -> __m512h { + _mm512_mask_getexp_ph(_mm512_undefined_ph(), 0xffffffff, a) +} - #[link_name = "llvm.x86.avx512fp16.vfmadd.ph.512"] - fn vfmaddph_512(a: __m512h, b: __m512h, c: __m512h, rounding: i32) -> __m512h; - #[link_name = "llvm.fma.f16"] - fn fmaf16(a: f16, b: f16, c: f16) -> f16; // TODO: use `crate::intrinsics::fmaf16` when it's available - #[link_name = "llvm.x86.avx512fp16.vfmadd.f16"] - fn vfmaddsh(a: f16, b: f16, c: f16, rounding: i32) -> f16; +/// Convert the exponent of each packed half-precision (16-bit) floating-point element in a to a half-precision +/// (16-bit) floating-point number representing the integer exponent, and store the results in dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). This intrinsic essentially calculates +/// `floor(log2(x))` for each element. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_getexp_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vgetexpph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_getexp_ph(src: __m512h, k: __mmask32, a: __m512h) -> __m512h { + _mm512_mask_getexp_round_ph::<_MM_FROUND_CUR_DIRECTION>(src, k, a) +} - #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.128"] - fn vfmaddsubph_128(a: __m128h, b: __m128h, c: __m128h) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.256"] - fn vfmaddsubph_256(a: __m256h, b: __m256h, c: __m256h) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.512"] - fn vfmaddsubph_512(a: __m512h, b: __m512h, c: __m512h, rounding: i32) -> __m512h; +/// Convert the exponent of each packed half-precision (16-bit) floating-point element in a to a half-precision +/// (16-bit) floating-point number representing the integer exponent, and store the results in dst using zeromask +/// k (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates +/// `floor(log2(x))` for each element. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_getexp_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vgetexpph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_getexp_ph(k: __mmask32, a: __m512h) -> __m512h { + _mm512_mask_getexp_ph(_mm512_setzero_ph(), k, a) +} - #[link_name = "llvm.x86.avx512fp16.mask.rcp.ph.128"] - fn vrcpph_128(a: __m128h, src: __m128h, k: __mmask8) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.rcp.ph.256"] - fn vrcpph_256(a: __m256h, src: __m256h, k: __mmask16) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.mask.rcp.ph.512"] - fn vrcpph_512(a: __m512h, src: __m512h, k: __mmask32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mask.rcp.sh"] - fn vrcpsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8) -> __m128h; +/// Convert the exponent of each packed half-precision (16-bit) floating-point element in a to a half-precision +/// (16-bit) floating-point number representing the integer exponent, and store the results in dst. +/// This intrinsic essentially calculates `floor(log2(x))` for each element. Exceptions can be suppressed +/// by passing _MM_FROUND_NO_EXC in the sae parameter +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_getexp_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vgetexpph, SAE = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_getexp_round_ph(a: __m512h) -> __m512h { + static_assert_sae!(SAE); + _mm512_mask_getexp_round_ph::(_mm512_undefined_ph(), 0xffffffff, a) +} - #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.ph.128"] - fn vrsqrtph_128(a: __m128h, src: __m128h, k: __mmask8) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.ph.256"] - fn vrsqrtph_256(a: __m256h, src: __m256h, k: __mmask16) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.ph.512"] - fn vrsqrtph_512(a: __m512h, src: __m512h, k: __mmask32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.sh"] - fn vrsqrtsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8) -> __m128h; +/// Convert the exponent of each packed half-precision (16-bit) floating-point element in a to a half-precision +/// (16-bit) floating-point number representing the integer exponent, and store the results in dst using writemask k +/// (elements are copied from src when the corresponding mask bit is not set). This intrinsic essentially calculates +/// `floor(log2(x))` for each element. Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_getexp_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vgetexpph, SAE = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_getexp_round_ph( + src: __m512h, + k: __mmask32, + a: __m512h, +) -> __m512h { + static_assert_sae!(SAE); + vgetexpph_512(a, src, k, SAE) +} - #[link_name = "llvm.x86.avx512fp16.sqrt.ph.512"] - fn vsqrtph_512(a: __m512h, rounding: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mask.sqrt.sh"] - fn vsqrtsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; +/// Convert the exponent of each packed half-precision (16-bit) floating-point element in a to a half-precision +/// (16-bit) floating-point number representing the integer exponent, and store the results in dst using zeromask +/// k (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates +/// `floor(log2(x))` for each element. Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_getexp_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vgetexpph, SAE = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_getexp_round_ph(k: __mmask32, a: __m512h) -> __m512h { + static_assert_sae!(SAE); + _mm512_mask_getexp_round_ph::(_mm512_setzero_ph(), k, a) +} - #[link_name = "llvm.x86.avx512fp16.max.ph.128"] - fn vmaxph_128(a: __m128h, b: __m128h) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.max.ph.256"] - fn vmaxph_256(a: __m256h, b: __m256h) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.max.ph.512"] - fn vmaxph_512(a: __m512h, b: __m512h, sae: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mask.max.sh.round"] - fn vmaxsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, sae: i32) -> __m128h; +/// Convert the exponent of the lower half-precision (16-bit) floating-point element in b to a half-precision +/// (16-bit) floating-point number representing the integer exponent, store the result in the lower element +/// of dst, and copy the upper 7 packed elements from a to the upper elements of dst. This intrinsic essentially +/// calculates `floor(log2(x))` for the lower element. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getexp_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vgetexpsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_getexp_sh(a: __m128h, b: __m128h) -> __m128h { + _mm_mask_getexp_sh(_mm_undefined_ph(), 0xff, a, b) +} - #[link_name = "llvm.x86.avx512fp16.min.ph.128"] - fn vminph_128(a: __m128h, b: __m128h) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.min.ph.256"] - fn vminph_256(a: __m256h, b: __m256h) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.min.ph.512"] - fn vminph_512(a: __m512h, b: __m512h, sae: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mask.min.sh.round"] - fn vminsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, sae: i32) -> __m128h; +/// Convert the exponent of the lower half-precision (16-bit) floating-point element in b to a half-precision +/// (16-bit) floating-point number representing the integer exponent, store the result in the lower element +/// of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper 7 +/// packed elements from a to the upper elements of dst. This intrinsic essentially calculates `floor(log2(x))` +/// for the lower element. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getexp_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vgetexpsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_getexp_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_getexp_round_sh::<_MM_FROUND_CUR_DIRECTION>(src, k, a, b) +} +/// Convert the exponent of the lower half-precision (16-bit) floating-point element in b to a half-precision +/// (16-bit) floating-point number representing the integer exponent, store the result in the lower element +/// of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed +/// elements from a to the upper elements of dst. This intrinsic essentially calculates `floor(log2(x))` for the +/// lower element. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getexp_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vgetexpsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_getexp_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_getexp_sh(_mm_setzero_ph(), k, a, b) } -#[cfg(test)] -mod tests { - use crate::core_arch::x86::*; - use crate::mem::transmute; - use crate::ptr::{addr_of, addr_of_mut}; - use stdarch_test::simd_test; +/// Convert the exponent of the lower half-precision (16-bit) floating-point element in b to a half-precision +/// (16-bit) floating-point number representing the integer exponent, store the result in the lower element +/// of dst, and copy the upper 7 packed elements from a to the upper elements of dst. This intrinsic essentially +/// calculates `floor(log2(x))` for the lower element. Exceptions can be suppressed by passing _MM_FROUND_NO_EXC +/// in the sae parameter +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getexp_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vgetexpsh, SAE = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_getexp_round_sh(a: __m128h, b: __m128h) -> __m128h { + static_assert_sae!(SAE); + _mm_mask_getexp_round_sh::(_mm_undefined_ph(), 0xff, a, b) +} - #[target_feature(enable = "avx512fp16")] - unsafe fn _mm_set1_pch(re: f16, im: f16) -> __m128h { - _mm_setr_ph(re, im, re, im, re, im, re, im) - } +/// Convert the exponent of the lower half-precision (16-bit) floating-point element in b to a half-precision +/// (16-bit) floating-point number representing the integer exponent, store the result in the lower element +/// of dst using writemask k (the element is copied from src when mask bit 0 is not set), and copy the upper 7 +/// packed elements from a to the upper elements of dst. This intrinsic essentially calculates `floor(log2(x))` +/// for the lower element. Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getexp_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vgetexpsh, SAE = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_getexp_round_sh( + src: __m128h, + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_sae!(SAE); + vgetexpsh(a, b, src, k, SAE) +} - #[target_feature(enable = "avx512fp16")] - unsafe fn _mm256_set1_pch(re: f16, im: f16) -> __m256h { - _mm256_setr_ph( - re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, - ) - } +/// Convert the exponent of the lower half-precision (16-bit) floating-point element in b to a half-precision +/// (16-bit) floating-point number representing the integer exponent, store the result in the lower element +/// of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed +/// elements from a to the upper elements of dst. This intrinsic essentially calculates `floor(log2(x))` for the +/// lower element. Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getexp_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vgetexpsh, SAE = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_getexp_round_sh( + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_sae!(SAE); + _mm_mask_getexp_round_sh::(_mm_setzero_ph(), k, a, b) +} - #[target_feature(enable = "avx512fp16")] - unsafe fn _mm512_set1_pch(re: f16, im: f16) -> __m512h { - _mm512_setr_ph( - re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, - re, im, re, im, re, im, re, im, re, im, - ) +/// Normalize the mantissas of packed half-precision (16-bit) floating-point elements in a, and store +/// the results in dst. This intrinsic essentially calculates `±(2^k)*|x.significand|`, where k depends +/// on the interval range defined by norm and the sign depends on sign and the source sign. +/// +/// The mantissa is normalized to the interval specified by interv, which can take the following values: +/// +/// _MM_MANT_NORM_1_2 // interval [1, 2) +/// _MM_MANT_NORM_p5_2 // interval [0.5, 2) +/// _MM_MANT_NORM_p5_1 // interval [0.5, 1) +/// _MM_MANT_NORM_p75_1p5 // interval [0.75, 1.5) +/// +/// The sign is determined by sc which can take the following values: +/// +/// _MM_MANT_SIGN_src // sign = sign(src) +/// _MM_MANT_SIGN_zero // sign = 0 +/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1 +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getmant_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vgetmantph, NORM = 0, SIGN = 0))] +#[rustc_legacy_const_generics(1, 2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_getmant_ph< + const NORM: _MM_MANTISSA_NORM_ENUM, + const SIGN: _MM_MANTISSA_SIGN_ENUM, +>( + a: __m128h, +) -> __m128h { + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); + _mm_mask_getmant_ph::(_mm_undefined_ph(), 0xff, a) +} + +/// Normalize the mantissas of packed half-precision (16-bit) floating-point elements in a, and store +/// the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// This intrinsic essentially calculates `±(2^k)*|x.significand|`, where k depends on the interval range defined +/// by norm and the sign depends on sign and the source sign. +/// +/// The mantissa is normalized to the interval specified by interv, which can take the following values: +/// +/// _MM_MANT_NORM_1_2 // interval [1, 2) +/// _MM_MANT_NORM_p5_2 // interval [0.5, 2) +/// _MM_MANT_NORM_p5_1 // interval [0.5, 1) +/// _MM_MANT_NORM_p75_1p5 // interval [0.75, 1.5) +/// +/// The sign is determined by sc which can take the following values: +/// +/// _MM_MANT_SIGN_src // sign = sign(src) +/// _MM_MANT_SIGN_zero // sign = 0 +/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1 +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getmant_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vgetmantph, NORM = 0, SIGN = 0))] +#[rustc_legacy_const_generics(3, 4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_getmant_ph< + const NORM: _MM_MANTISSA_NORM_ENUM, + const SIGN: _MM_MANTISSA_SIGN_ENUM, +>( + src: __m128h, + k: __mmask8, + a: __m128h, +) -> __m128h { + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); + vgetmantph_128(a, (SIGN << 2) | NORM, src, k) +} + +/// Normalize the mantissas of packed half-precision (16-bit) floating-point elements in a, and store +/// the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// This intrinsic essentially calculates `±(2^k)*|x.significand|`, where k depends on the interval range defined +/// by norm and the sign depends on sign and the source sign. +/// +/// The mantissa is normalized to the interval specified by interv, which can take the following values: +/// +/// _MM_MANT_NORM_1_2 // interval [1, 2) +/// _MM_MANT_NORM_p5_2 // interval [0.5, 2) +/// _MM_MANT_NORM_p5_1 // interval [0.5, 1) +/// _MM_MANT_NORM_p75_1p5 // interval [0.75, 1.5) +/// +/// The sign is determined by sc which can take the following values: +/// +/// _MM_MANT_SIGN_src // sign = sign(src) +/// _MM_MANT_SIGN_zero // sign = 0 +/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1 +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getmant_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vgetmantph, NORM = 0, SIGN = 0))] +#[rustc_legacy_const_generics(2, 3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_getmant_ph< + const NORM: _MM_MANTISSA_NORM_ENUM, + const SIGN: _MM_MANTISSA_SIGN_ENUM, +>( + k: __mmask8, + a: __m128h, +) -> __m128h { + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); + _mm_mask_getmant_ph::(_mm_setzero_ph(), k, a) +} + +/// Normalize the mantissas of packed half-precision (16-bit) floating-point elements in a, and store +/// the results in dst. This intrinsic essentially calculates `±(2^k)*|x.significand|`, where k depends +/// on the interval range defined by norm and the sign depends on sign and the source sign. +/// +/// The mantissa is normalized to the interval specified by interv, which can take the following values: +/// +/// _MM_MANT_NORM_1_2 // interval [1, 2) +/// _MM_MANT_NORM_p5_2 // interval [0.5, 2) +/// _MM_MANT_NORM_p5_1 // interval [0.5, 1) +/// _MM_MANT_NORM_p75_1p5 // interval [0.75, 1.5) +/// +/// The sign is determined by sc which can take the following values: +/// +/// _MM_MANT_SIGN_src // sign = sign(src) +/// _MM_MANT_SIGN_zero // sign = 0 +/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1 +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_getmant_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vgetmantph, NORM = 0, SIGN = 0))] +#[rustc_legacy_const_generics(1, 2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_getmant_ph< + const NORM: _MM_MANTISSA_NORM_ENUM, + const SIGN: _MM_MANTISSA_SIGN_ENUM, +>( + a: __m256h, +) -> __m256h { + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); + _mm256_mask_getmant_ph::(_mm256_undefined_ph(), 0xffff, a) +} + +/// Normalize the mantissas of packed half-precision (16-bit) floating-point elements in a, and store +/// the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// This intrinsic essentially calculates `±(2^k)*|x.significand|`, where k depends on the interval range defined +/// by norm and the sign depends on sign and the source sign. +/// +/// The mantissa is normalized to the interval specified by interv, which can take the following values: +/// +/// _MM_MANT_NORM_1_2 // interval [1, 2) +/// _MM_MANT_NORM_p5_2 // interval [0.5, 2) +/// _MM_MANT_NORM_p5_1 // interval [0.5, 1) +/// _MM_MANT_NORM_p75_1p5 // interval [0.75, 1.5) +/// +/// The sign is determined by sc which can take the following values: +/// +/// _MM_MANT_SIGN_src // sign = sign(src) +/// _MM_MANT_SIGN_zero // sign = 0 +/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1 +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_getmant_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vgetmantph, NORM = 0, SIGN = 0))] +#[rustc_legacy_const_generics(3, 4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_getmant_ph< + const NORM: _MM_MANTISSA_NORM_ENUM, + const SIGN: _MM_MANTISSA_SIGN_ENUM, +>( + src: __m256h, + k: __mmask16, + a: __m256h, +) -> __m256h { + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); + vgetmantph_256(a, (SIGN << 2) | NORM, src, k) +} + +/// Normalize the mantissas of packed half-precision (16-bit) floating-point elements in a, and store +/// the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// This intrinsic essentially calculates `±(2^k)*|x.significand|`, where k depends on the interval range defined +/// by norm and the sign depends on sign and the source sign. +/// +/// The mantissa is normalized to the interval specified by interv, which can take the following values: +/// +/// _MM_MANT_NORM_1_2 // interval [1, 2) +/// _MM_MANT_NORM_p5_2 // interval [0.5, 2) +/// _MM_MANT_NORM_p5_1 // interval [0.5, 1) +/// _MM_MANT_NORM_p75_1p5 // interval [0.75, 1.5) +/// +/// The sign is determined by sc which can take the following values: +/// +/// _MM_MANT_SIGN_src // sign = sign(src) +/// _MM_MANT_SIGN_zero // sign = 0 +/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1 +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_getmant_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vgetmantph, NORM = 0, SIGN = 0))] +#[rustc_legacy_const_generics(2, 3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_getmant_ph< + const NORM: _MM_MANTISSA_NORM_ENUM, + const SIGN: _MM_MANTISSA_SIGN_ENUM, +>( + k: __mmask16, + a: __m256h, +) -> __m256h { + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); + _mm256_mask_getmant_ph::(_mm256_setzero_ph(), k, a) +} + +/// Normalize the mantissas of packed half-precision (16-bit) floating-point elements in a, and store +/// the results in dst. This intrinsic essentially calculates `±(2^k)*|x.significand|`, where k depends +/// on the interval range defined by norm and the sign depends on sign and the source sign. +/// +/// The mantissa is normalized to the interval specified by interv, which can take the following values: +/// +/// _MM_MANT_NORM_1_2 // interval [1, 2) +/// _MM_MANT_NORM_p5_2 // interval [0.5, 2) +/// _MM_MANT_NORM_p5_1 // interval [0.5, 1) +/// _MM_MANT_NORM_p75_1p5 // interval [0.75, 1.5) +/// +/// The sign is determined by sc which can take the following values: +/// +/// _MM_MANT_SIGN_src // sign = sign(src) +/// _MM_MANT_SIGN_zero // sign = 0 +/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1 +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_getmant_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vgetmantph, NORM = 0, SIGN = 0))] +#[rustc_legacy_const_generics(1, 2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_getmant_ph< + const NORM: _MM_MANTISSA_NORM_ENUM, + const SIGN: _MM_MANTISSA_SIGN_ENUM, +>( + a: __m512h, +) -> __m512h { + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); + _mm512_mask_getmant_ph::(_mm512_undefined_ph(), 0xffffffff, a) +} + +/// Normalize the mantissas of packed half-precision (16-bit) floating-point elements in a, and store +/// the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// This intrinsic essentially calculates `±(2^k)*|x.significand|`, where k depends on the interval range defined +/// by norm and the sign depends on sign and the source sign. +/// +/// The mantissa is normalized to the interval specified by interv, which can take the following values: +/// +/// _MM_MANT_NORM_1_2 // interval [1, 2) +/// _MM_MANT_NORM_p5_2 // interval [0.5, 2) +/// _MM_MANT_NORM_p5_1 // interval [0.5, 1) +/// _MM_MANT_NORM_p75_1p5 // interval [0.75, 1.5) +/// +/// The sign is determined by sc which can take the following values: +/// +/// _MM_MANT_SIGN_src // sign = sign(src) +/// _MM_MANT_SIGN_zero // sign = 0 +/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1 +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_getmant_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vgetmantph, NORM = 0, SIGN = 0))] +#[rustc_legacy_const_generics(3, 4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_getmant_ph< + const NORM: _MM_MANTISSA_NORM_ENUM, + const SIGN: _MM_MANTISSA_SIGN_ENUM, +>( + src: __m512h, + k: __mmask32, + a: __m512h, +) -> __m512h { + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); + _mm512_mask_getmant_round_ph::(src, k, a) +} + +/// Normalize the mantissas of packed half-precision (16-bit) floating-point elements in a, and store +/// the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// This intrinsic essentially calculates `±(2^k)*|x.significand|`, where k depends on the interval range defined +/// by norm and the sign depends on sign and the source sign. +/// +/// The mantissa is normalized to the interval specified by interv, which can take the following values: +/// +/// _MM_MANT_NORM_1_2 // interval [1, 2) +/// _MM_MANT_NORM_p5_2 // interval [0.5, 2) +/// _MM_MANT_NORM_p5_1 // interval [0.5, 1) +/// _MM_MANT_NORM_p75_1p5 // interval [0.75, 1.5) +/// +/// The sign is determined by sc which can take the following values: +/// +/// _MM_MANT_SIGN_src // sign = sign(src) +/// _MM_MANT_SIGN_zero // sign = 0 +/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1 +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_getmant_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vgetmantph, NORM = 0, SIGN = 0))] +#[rustc_legacy_const_generics(2, 3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_getmant_ph< + const NORM: _MM_MANTISSA_NORM_ENUM, + const SIGN: _MM_MANTISSA_SIGN_ENUM, +>( + k: __mmask32, + a: __m512h, +) -> __m512h { + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); + _mm512_mask_getmant_ph::(_mm512_setzero_ph(), k, a) +} + +/// Normalize the mantissas of packed half-precision (16-bit) floating-point elements in a, and store +/// the results in dst. This intrinsic essentially calculates `±(2^k)*|x.significand|`, where k depends +/// on the interval range defined by norm and the sign depends on sign and the source sign. Exceptions can +/// be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter +/// +/// The mantissa is normalized to the interval specified by interv, which can take the following values: +/// +/// _MM_MANT_NORM_1_2 // interval [1, 2) +/// _MM_MANT_NORM_p5_2 // interval [0.5, 2) +/// _MM_MANT_NORM_p5_1 // interval [0.5, 1) +/// _MM_MANT_NORM_p75_1p5 // interval [0.75, 1.5) +/// +/// The sign is determined by sc which can take the following values: +/// +/// _MM_MANT_SIGN_src // sign = sign(src) +/// _MM_MANT_SIGN_zero // sign = 0 +/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1 +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_getmant_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vgetmantph, NORM = 0, SIGN = 0, SAE = 8))] +#[rustc_legacy_const_generics(1, 2, 3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_getmant_round_ph< + const NORM: _MM_MANTISSA_NORM_ENUM, + const SIGN: _MM_MANTISSA_SIGN_ENUM, + const SAE: i32, +>( + a: __m512h, +) -> __m512h { + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); + static_assert_sae!(SAE); + _mm512_mask_getmant_round_ph::(_mm512_undefined_ph(), 0xffffffff, a) +} + +/// Normalize the mantissas of packed half-precision (16-bit) floating-point elements in a, and store +/// the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// This intrinsic essentially calculates `±(2^k)*|x.significand|`, where k depends on the interval range defined +/// by norm and the sign depends on sign and the source sign. Exceptions can be suppressed by passing _MM_FROUND_NO_EXC +/// in the sae parameter +/// +/// The mantissa is normalized to the interval specified by interv, which can take the following values: +/// +/// _MM_MANT_NORM_1_2 // interval [1, 2) +/// _MM_MANT_NORM_p5_2 // interval [0.5, 2) +/// _MM_MANT_NORM_p5_1 // interval [0.5, 1) +/// _MM_MANT_NORM_p75_1p5 // interval [0.75, 1.5) +/// +/// The sign is determined by sc which can take the following values: +/// +/// _MM_MANT_SIGN_src // sign = sign(src) +/// _MM_MANT_SIGN_zero // sign = 0 +/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1 +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_getmant_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vgetmantph, NORM = 0, SIGN = 0, SAE = 8))] +#[rustc_legacy_const_generics(3, 4, 5)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_getmant_round_ph< + const NORM: _MM_MANTISSA_NORM_ENUM, + const SIGN: _MM_MANTISSA_SIGN_ENUM, + const SAE: i32, +>( + src: __m512h, + k: __mmask32, + a: __m512h, +) -> __m512h { + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); + static_assert_sae!(SAE); + vgetmantph_512(a, (SIGN << 2) | NORM, src, k, SAE) +} + +/// Normalize the mantissas of packed half-precision (16-bit) floating-point elements in a, and store +/// the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// This intrinsic essentially calculates `±(2^k)*|x.significand|`, where k depends on the interval range defined +/// by norm and the sign depends on sign and the source sign. Exceptions can be suppressed by passing _MM_FROUND_NO_EXC +/// in the sae parameter +/// +/// The mantissa is normalized to the interval specified by interv, which can take the following values: +/// +/// _MM_MANT_NORM_1_2 // interval [1, 2) +/// _MM_MANT_NORM_p5_2 // interval [0.5, 2) +/// _MM_MANT_NORM_p5_1 // interval [0.5, 1) +/// _MM_MANT_NORM_p75_1p5 // interval [0.75, 1.5) +/// +/// The sign is determined by sc which can take the following values: +/// +/// _MM_MANT_SIGN_src // sign = sign(src) +/// _MM_MANT_SIGN_zero // sign = 0 +/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1 +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_getmant_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vgetmantph, NORM = 0, SIGN = 0, SAE = 8))] +#[rustc_legacy_const_generics(2, 3, 4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_getmant_round_ph< + const NORM: _MM_MANTISSA_NORM_ENUM, + const SIGN: _MM_MANTISSA_SIGN_ENUM, + const SAE: i32, +>( + k: __mmask32, + a: __m512h, +) -> __m512h { + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); + static_assert_sae!(SAE); + _mm512_mask_getmant_round_ph::(_mm512_setzero_ph(), k, a) +} + +/// Normalize the mantissas of the lower half-precision (16-bit) floating-point element in b, store +/// the result in the lower element of dst, and copy the upper 7 packed elements from a to the upper +/// elements of dst. This intrinsic essentially calculates `±(2^k)*|x.significand|`, where k depends +/// on the interval range defined by norm and the sign depends on sign and the source sign. +/// +/// The mantissa is normalized to the interval specified by interv, which can take the following values: +/// +/// _MM_MANT_NORM_1_2 // interval [1, 2) +/// _MM_MANT_NORM_p5_2 // interval [0.5, 2) +/// _MM_MANT_NORM_p5_1 // interval [0.5, 1) +/// _MM_MANT_NORM_p75_1p5 // interval [0.75, 1.5) +/// +/// The sign is determined by sc which can take the following values: +/// +/// _MM_MANT_SIGN_src // sign = sign(src) +/// _MM_MANT_SIGN_zero // sign = 0 +/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1 +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getmant_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vgetmantsh, NORM = 0, SIGN = 0))] +#[rustc_legacy_const_generics(2, 3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_getmant_sh< + const NORM: _MM_MANTISSA_NORM_ENUM, + const SIGN: _MM_MANTISSA_SIGN_ENUM, +>( + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); + _mm_mask_getmant_sh::(_mm_undefined_ph(), 0xff, a, b) +} + +/// Normalize the mantissas of the lower half-precision (16-bit) floating-point element in b, store +/// the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), +/// and copy the upper 7 packed elements from a to the upper elements of dst. This intrinsic essentially calculates +/// `±(2^k)*|x.significand|`, where k depends on the interval range defined by norm and the sign depends on sign and +/// the source sign. +/// +/// The mantissa is normalized to the interval specified by interv, which can take the following values: +/// +/// _MM_MANT_NORM_1_2 // interval [1, 2) +/// _MM_MANT_NORM_p5_2 // interval [0.5, 2) +/// _MM_MANT_NORM_p5_1 // interval [0.5, 1) +/// _MM_MANT_NORM_p75_1p5 // interval [0.75, 1.5) +/// +/// The sign is determined by sc which can take the following values: +/// +/// _MM_MANT_SIGN_src // sign = sign(src) +/// _MM_MANT_SIGN_zero // sign = 0 +/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1 +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getmant_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vgetmantsh, NORM = 0, SIGN = 0))] +#[rustc_legacy_const_generics(4, 5)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_getmant_sh< + const NORM: _MM_MANTISSA_NORM_ENUM, + const SIGN: _MM_MANTISSA_SIGN_ENUM, +>( + src: __m128h, + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); + _mm_mask_getmant_round_sh::(src, k, a, b) +} + +/// Normalize the mantissas of the lower half-precision (16-bit) floating-point element in b, store +/// the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), +/// and copy the upper 7 packed elements from a to the upper elements of dst. This intrinsic essentially calculates +/// `±(2^k)*|x.significand|`, where k depends on the interval range defined by norm and the sign depends on sign and +/// the source sign. +/// +/// The mantissa is normalized to the interval specified by interv, which can take the following values: +/// +/// _MM_MANT_NORM_1_2 // interval [1, 2) +/// _MM_MANT_NORM_p5_2 // interval [0.5, 2) +/// _MM_MANT_NORM_p5_1 // interval [0.5, 1) +/// _MM_MANT_NORM_p75_1p5 // interval [0.75, 1.5) +/// +/// The sign is determined by sc which can take the following values: +/// +/// _MM_MANT_SIGN_src // sign = sign(src) +/// _MM_MANT_SIGN_zero // sign = 0 +/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1 +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getmant_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vgetmantsh, NORM = 0, SIGN = 0))] +#[rustc_legacy_const_generics(3, 4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_getmant_sh< + const NORM: _MM_MANTISSA_NORM_ENUM, + const SIGN: _MM_MANTISSA_SIGN_ENUM, +>( + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); + _mm_mask_getmant_sh::(_mm_setzero_ph(), k, a, b) +} + +/// Normalize the mantissas of the lower half-precision (16-bit) floating-point element in b, store +/// the result in the lower element of dst, and copy the upper 7 packed elements from a to the upper +/// elements of dst. This intrinsic essentially calculates `±(2^k)*|x.significand|`, where k depends +/// on the interval range defined by norm and the sign depends on sign and the source sign. Exceptions can +/// be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter +/// +/// The mantissa is normalized to the interval specified by interv, which can take the following values: +/// +/// _MM_MANT_NORM_1_2 // interval [1, 2) +/// _MM_MANT_NORM_p5_2 // interval [0.5, 2) +/// _MM_MANT_NORM_p5_1 // interval [0.5, 1) +/// _MM_MANT_NORM_p75_1p5 // interval [0.75, 1.5) +/// +/// The sign is determined by sc which can take the following values: +/// +/// _MM_MANT_SIGN_src // sign = sign(src) +/// _MM_MANT_SIGN_zero // sign = 0 +/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1 +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_getmant_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vgetmantsh, NORM = 0, SIGN = 0, SAE = 8))] +#[rustc_legacy_const_generics(2, 3, 4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_getmant_round_sh< + const NORM: _MM_MANTISSA_NORM_ENUM, + const SIGN: _MM_MANTISSA_SIGN_ENUM, + const SAE: i32, +>( + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); + static_assert_sae!(SAE); + _mm_mask_getmant_round_sh::(_mm_undefined_ph(), 0xff, a, b) +} + +/// Normalize the mantissas of the lower half-precision (16-bit) floating-point element in b, store +/// the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), +/// and copy the upper 7 packed elements from a to the upper elements of dst. This intrinsic essentially calculates +/// `±(2^k)*|x.significand|`, where k depends on the interval range defined by norm and the sign depends on sign and +/// the source sign. Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter +/// +/// The mantissa is normalized to the interval specified by interv, which can take the following values: +/// +/// _MM_MANT_NORM_1_2 // interval [1, 2) +/// _MM_MANT_NORM_p5_2 // interval [0.5, 2) +/// _MM_MANT_NORM_p5_1 // interval [0.5, 1) +/// _MM_MANT_NORM_p75_1p5 // interval [0.75, 1.5) +/// +/// The sign is determined by sc which can take the following values: +/// +/// _MM_MANT_SIGN_src // sign = sign(src) +/// _MM_MANT_SIGN_zero // sign = 0 +/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1 +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_getmant_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vgetmantsh, NORM = 0, SIGN = 0, SAE = 8))] +#[rustc_legacy_const_generics(4, 5, 6)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_getmant_round_sh< + const NORM: _MM_MANTISSA_NORM_ENUM, + const SIGN: _MM_MANTISSA_SIGN_ENUM, + const SAE: i32, +>( + src: __m128h, + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); + static_assert_sae!(SAE); + vgetmantsh(a, b, (SIGN << 2) | NORM, src, k, SAE) +} + +/// Normalize the mantissas of the lower half-precision (16-bit) floating-point element in b, store +/// the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), +/// and copy the upper 7 packed elements from a to the upper elements of dst. This intrinsic essentially calculates +/// `±(2^k)*|x.significand|`, where k depends on the interval range defined by norm and the sign depends on sign and +/// the source sign. Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter +/// +/// The mantissa is normalized to the interval specified by interv, which can take the following values: +/// +/// _MM_MANT_NORM_1_2 // interval [1, 2) +/// _MM_MANT_NORM_p5_2 // interval [0.5, 2) +/// _MM_MANT_NORM_p5_1 // interval [0.5, 1) +/// _MM_MANT_NORM_p75_1p5 // interval [0.75, 1.5) +/// +/// The sign is determined by sc which can take the following values: +/// +/// _MM_MANT_SIGN_src // sign = sign(src) +/// _MM_MANT_SIGN_zero // sign = 0 +/// _MM_MANT_SIGN_nan // dst = NaN if sign(src) = 1 +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_getmant_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vgetmantsh, NORM = 0, SIGN = 0, SAE = 8))] +#[rustc_legacy_const_generics(3, 4, 5)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_getmant_round_sh< + const NORM: _MM_MANTISSA_NORM_ENUM, + const SIGN: _MM_MANTISSA_SIGN_ENUM, + const SAE: i32, +>( + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_uimm_bits!(NORM, 4); + static_assert_uimm_bits!(SIGN, 2); + static_assert_sae!(SAE); + _mm_mask_getmant_round_sh::(_mm_setzero_ph(), k, a, b) +} + +/// Round packed half-precision (16-bit) floating-point elements in a to the number of fraction bits +/// specified by imm8, and store the results in dst. +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_roundscale_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_roundscale_ph(a: __m128h) -> __m128h { + static_assert_uimm_bits!(IMM8, 8); + _mm_mask_roundscale_ph::(_mm_undefined_ph(), 0xff, a) +} + +/// Round packed half-precision (16-bit) floating-point elements in a to the number of fraction bits +/// specified by imm8, and store the results in dst using writemask k (elements are copied from src when +/// the corresponding mask bit is not set). +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_roundscale_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_roundscale_ph( + src: __m128h, + k: __mmask8, + a: __m128h, +) -> __m128h { + static_assert_uimm_bits!(IMM8, 8); + vrndscaleph_128(a, IMM8, src, k) +} + +/// Round packed half-precision (16-bit) floating-point elements in a to the number of fraction bits +/// specified by imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding +/// mask bit is not set). +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_roundscale_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_roundscale_ph(k: __mmask8, a: __m128h) -> __m128h { + static_assert_uimm_bits!(IMM8, 8); + _mm_mask_roundscale_ph::(_mm_setzero_ph(), k, a) +} + +/// Round packed half-precision (16-bit) floating-point elements in a to the number of fraction bits +/// specified by imm8, and store the results in dst. +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_roundscale_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_roundscale_ph(a: __m256h) -> __m256h { + static_assert_uimm_bits!(IMM8, 8); + _mm256_mask_roundscale_ph::(_mm256_undefined_ph(), 0xffff, a) +} + +/// Round packed half-precision (16-bit) floating-point elements in a to the number of fraction bits +/// specified by imm8, and store the results in dst using writemask k (elements are copied from src when +/// the corresponding mask bit is not set). +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_roundscale_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_roundscale_ph( + src: __m256h, + k: __mmask16, + a: __m256h, +) -> __m256h { + static_assert_uimm_bits!(IMM8, 8); + vrndscaleph_256(a, IMM8, src, k) +} + +/// Round packed half-precision (16-bit) floating-point elements in a to the number of fraction bits +/// specified by imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding +/// mask bit is not set). +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_roundscale_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_roundscale_ph(k: __mmask16, a: __m256h) -> __m256h { + static_assert_uimm_bits!(IMM8, 8); + _mm256_mask_roundscale_ph::(_mm256_setzero_ph(), k, a) +} + +/// Round packed half-precision (16-bit) floating-point elements in a to the number of fraction bits +/// specified by imm8, and store the results in dst. +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_roundscale_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_roundscale_ph(a: __m512h) -> __m512h { + static_assert_uimm_bits!(IMM8, 8); + _mm512_mask_roundscale_ph::(_mm512_undefined_ph(), 0xffffffff, a) +} + +/// Round packed half-precision (16-bit) floating-point elements in a to the number of fraction bits +/// specified by imm8, and store the results in dst using writemask k (elements are copied from src when +/// the corresponding mask bit is not set). +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_roundscale_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_roundscale_ph( + src: __m512h, + k: __mmask32, + a: __m512h, +) -> __m512h { + static_assert_uimm_bits!(IMM8, 8); + _mm512_mask_roundscale_round_ph::(src, k, a) +} + +/// Round packed half-precision (16-bit) floating-point elements in a to the number of fraction bits +/// specified by imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding +/// mask bit is not set). +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_roundscale_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_roundscale_ph(k: __mmask32, a: __m512h) -> __m512h { + static_assert_uimm_bits!(IMM8, 8); + _mm512_mask_roundscale_ph::(_mm512_setzero_ph(), k, a) +} + +/// Round packed half-precision (16-bit) floating-point elements in a to the number of fraction bits +/// specified by imm8, and store the results in dst. Exceptions can be suppressed by passing _MM_FROUND_NO_EXC +/// in the sae parameter +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_roundscale_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0, SAE = 8))] +#[rustc_legacy_const_generics(1, 2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_roundscale_round_ph(a: __m512h) -> __m512h { + static_assert_uimm_bits!(IMM8, 8); + static_assert_sae!(SAE); + _mm512_mask_roundscale_round_ph::(_mm512_undefined_ph(), 0xffffffff, a) +} + +/// Round packed half-precision (16-bit) floating-point elements in a to the number of fraction bits +/// specified by imm8, and store the results in dst using writemask k (elements are copied from src when +/// the corresponding mask bit is not set). Exceptions can be suppressed by passing _MM_FROUND_NO_EXC +/// in the sae parameter +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_roundscale_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0, SAE = 8))] +#[rustc_legacy_const_generics(3, 4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_roundscale_round_ph( + src: __m512h, + k: __mmask32, + a: __m512h, +) -> __m512h { + static_assert_uimm_bits!(IMM8, 8); + static_assert_sae!(SAE); + vrndscaleph_512(a, IMM8, src, k, SAE) +} + +/// Round packed half-precision (16-bit) floating-point elements in a to the number of fraction bits +/// specified by imm8, and store the results in dst using zeromask k (elements are zeroed out when the corresponding +/// mask bit is not set). Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_roundscale_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vrndscaleph, IMM8 = 0, SAE = 8))] +#[rustc_legacy_const_generics(2, 3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_roundscale_round_ph( + k: __mmask32, + a: __m512h, +) -> __m512h { + static_assert_uimm_bits!(IMM8, 8); + static_assert_sae!(SAE); + _mm512_mask_roundscale_round_ph::(_mm512_setzero_ph(), k, a) +} + +/// Round the lower half-precision (16-bit) floating-point element in b to the number of fraction bits +/// specified by imm8, store the result in the lower element of dst, and copy the upper 7 packed elements +/// from a to the upper elements of dst. +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_roundscale_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vrndscalesh, IMM8 = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_roundscale_sh(a: __m128h, b: __m128h) -> __m128h { + static_assert_uimm_bits!(IMM8, 8); + _mm_mask_roundscale_sh::(_mm_undefined_ph(), 0xff, a, b) +} + +/// Round the lower half-precision (16-bit) floating-point element in b to the number of fraction bits +/// specified by imm8, store the result in the lower element of dst using writemask k (the element is copied +/// from src when mask bit 0 is not set), and copy the upper 7 packed elements from a to the upper elements of dst. +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_roundscale_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vrndscalesh, IMM8 = 0))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_roundscale_sh( + src: __m128h, + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_uimm_bits!(IMM8, 8); + _mm_mask_roundscale_round_sh::(src, k, a, b) +} + +/// Round the lower half-precision (16-bit) floating-point element in b to the number of fraction bits +/// specified by imm8, store the result in the lower element of dst using zeromask k (the element is zeroed +/// out when mask bit 0 is not set), and copy the upper 7 packed elements from a to the upper elements of dst. +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_roundscale_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vrndscalesh, IMM8 = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_roundscale_sh( + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_uimm_bits!(IMM8, 8); + _mm_mask_roundscale_sh::(_mm_setzero_ph(), k, a, b) +} + +/// Round the lower half-precision (16-bit) floating-point element in b to the number of fraction bits +/// specified by imm8, store the result in the lower element of dst, and copy the upper 7 packed elements +/// from a to the upper elements of dst. +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_roundscale_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vrndscalesh, IMM8 = 0, SAE = 8))] +#[rustc_legacy_const_generics(2, 3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_roundscale_round_sh( + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_uimm_bits!(IMM8, 8); + static_assert_sae!(SAE); + _mm_mask_roundscale_round_sh::(_mm_undefined_ph(), 0xff, a, b) +} + +/// Round the lower half-precision (16-bit) floating-point element in b to the number of fraction bits +/// specified by imm8, store the result in the lower element of dst using writemask k (the element is copied +/// from src when mask bit 0 is not set), and copy the upper 7 packed elements from a to the upper elements of dst. +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_roundscale_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vrndscalesh, IMM8 = 0, SAE = 8))] +#[rustc_legacy_const_generics(4, 5)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_roundscale_round_sh( + src: __m128h, + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_uimm_bits!(IMM8, 8); + static_assert_sae!(SAE); + vrndscalesh(a, b, src, k, IMM8, SAE) +} + +/// Round the lower half-precision (16-bit) floating-point element in b to the number of fraction bits +/// specified by imm8, store the result in the lower element of dst using zeromask k (the element is zeroed +/// out when mask bit 0 is not set), and copy the upper 7 packed elements from a to the upper elements of dst. +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_roundscale_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vrndscalesh, IMM8 = 0, SAE = 8))] +#[rustc_legacy_const_generics(3, 4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_roundscale_round_sh( + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_uimm_bits!(IMM8, 8); + static_assert_sae!(SAE); + _mm_mask_roundscale_round_sh::(_mm_setzero_ph(), k, a, b) +} + +/// Scale the packed half-precision (16-bit) floating-point elements in a using values from b, and store +/// the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_scalef_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vscalefph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_scalef_ph(a: __m128h, b: __m128h) -> __m128h { + _mm_mask_scalef_ph(_mm_undefined_ph(), 0xff, a, b) +} + +/// Scale the packed half-precision (16-bit) floating-point elements in a using values from b, and store +/// the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_scalef_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vscalefph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_scalef_ph(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + vscalefph_128(a, b, src, k) +} + +/// Scale the packed half-precision (16-bit) floating-point elements in a using values from b, and store +/// the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_scalef_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vscalefph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_scalef_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_scalef_ph(_mm_setzero_ph(), k, a, b) +} + +/// Scale the packed half-precision (16-bit) floating-point elements in a using values from b, and store +/// the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_scalef_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vscalefph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_scalef_ph(a: __m256h, b: __m256h) -> __m256h { + _mm256_mask_scalef_ph(_mm256_undefined_ph(), 0xffff, a, b) +} + +/// Scale the packed half-precision (16-bit) floating-point elements in a using values from b, and store +/// the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_scalef_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vscalefph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_scalef_ph(src: __m256h, k: __mmask16, a: __m256h, b: __m256h) -> __m256h { + vscalefph_256(a, b, src, k) +} + +/// Scale the packed half-precision (16-bit) floating-point elements in a using values from b, and store +/// the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_scalef_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vscalefph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_scalef_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h { + _mm256_mask_scalef_ph(_mm256_setzero_ph(), k, a, b) +} + +/// Scale the packed half-precision (16-bit) floating-point elements in a using values from b, and store +/// the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_scalef_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vscalefph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_scalef_ph(a: __m512h, b: __m512h) -> __m512h { + _mm512_mask_scalef_ph(_mm512_undefined_ph(), 0xffffffff, a, b) +} + +/// Scale the packed half-precision (16-bit) floating-point elements in a using values from b, and store +/// the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_scalef_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vscalefph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_scalef_ph(src: __m512h, k: __mmask32, a: __m512h, b: __m512h) -> __m512h { + _mm512_mask_scalef_round_ph::<_MM_FROUND_CUR_DIRECTION>(src, k, a, b) +} + +/// Scale the packed half-precision (16-bit) floating-point elements in a using values from b, and store +/// the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_scalef_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vscalefph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_scalef_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { + _mm512_mask_scalef_ph(_mm512_setzero_ph(), k, a, b) +} + +/// Scale the packed half-precision (16-bit) floating-point elements in a using values from b, and store +/// the results in dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_scalef_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vscalefph, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_scalef_round_ph(a: __m512h, b: __m512h) -> __m512h { + static_assert_rounding!(ROUNDING); + _mm512_mask_scalef_round_ph::(_mm512_undefined_ph(), 0xffffffff, a, b) +} + +/// Scale the packed half-precision (16-bit) floating-point elements in a using values from b, and store +/// the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_scalef_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vscalefph, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_scalef_round_ph( + src: __m512h, + k: __mmask32, + a: __m512h, + b: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + vscalefph_512(a, b, src, k, ROUNDING) +} + +/// Scale the packed half-precision (16-bit) floating-point elements in a using values from b, and store +/// the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_scalef_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vscalefph, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_scalef_round_ph( + k: __mmask32, + a: __m512h, + b: __m512h, +) -> __m512h { + static_assert_rounding!(ROUNDING); + _mm512_mask_scalef_round_ph::(_mm512_setzero_ph(), k, a, b) +} + +/// Scale the packed single-precision (32-bit) floating-point elements in a using values from b, store +/// the result in the lower element of dst, and copy the upper 7 packed elements from a to the upper +/// elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_scalef_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vscalefsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_scalef_sh(a: __m128h, b: __m128h) -> __m128h { + _mm_mask_scalef_sh(_mm_undefined_ph(), 0xff, a, b) +} + +/// Scale the packed single-precision (32-bit) floating-point elements in a using values from b, store +/// the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), +/// and copy the upper 7 packed elements from a to the upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_scalef_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vscalefsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_scalef_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_scalef_round_sh::<_MM_FROUND_CUR_DIRECTION>(src, k, a, b) +} + +/// Scale the packed single-precision (32-bit) floating-point elements in a using values from b, store +/// the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), +/// and copy the upper 7 packed elements from a to the upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_scalef_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vscalefsh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_scalef_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + _mm_mask_scalef_sh(_mm_setzero_ph(), k, a, b) +} + +/// Scale the packed single-precision (32-bit) floating-point elements in a using values from b, store +/// the result in the lower element of dst, and copy the upper 7 packed elements from a to the upper +/// elements of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_scalef_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vscalefsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_scalef_round_sh(a: __m128h, b: __m128h) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm_mask_scalef_round_sh::(_mm_undefined_ph(), 0xff, a, b) +} + +/// Scale the packed single-precision (32-bit) floating-point elements in a using values from b, store +/// the result in the lower element of dst using writemask k (the element is copied from src when mask bit 0 is not set), +/// and copy the upper 7 packed elements from a to the upper elements of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_scalef_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vscalefsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_scalef_round_sh( + src: __m128h, + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + vscalefsh(a, b, src, k, ROUNDING) +} + +/// Scale the packed single-precision (32-bit) floating-point elements in a using values from b, store +/// the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), +/// and copy the upper 7 packed elements from a to the upper elements of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_scalef_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vscalefsh, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_scalef_round_sh( + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm_mask_scalef_round_sh::(_mm_setzero_ph(), k, a, b) +} + +/// Extract the reduced argument of packed half-precision (16-bit) floating-point elements in a by the +/// number of bits specified by imm8, and store the results in dst. +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_reduce_ph(a: __m128h) -> __m128h { + static_assert_uimm_bits!(IMM8, 8); + _mm_mask_reduce_ph::(_mm_undefined_ph(), 0xff, a) +} + +/// Extract the reduced argument of packed half-precision (16-bit) floating-point elements in a by the +/// number of bits specified by imm8, and store the results in dst using writemask k (elements are copied +/// from src when the corresponding mask bit is not set). +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_reduce_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_reduce_ph( + src: __m128h, + k: __mmask8, + a: __m128h, +) -> __m128h { + static_assert_uimm_bits!(IMM8, 8); + vreduceph_128(a, IMM8, src, k) +} + +/// Extract the reduced argument of packed half-precision (16-bit) floating-point elements in a by the +/// number of bits specified by imm8, and store the results in dst using zeromask k (elements are zeroed +/// out when the corresponding mask bit is not set). +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_reduce_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_reduce_ph(k: __mmask8, a: __m128h) -> __m128h { + static_assert_uimm_bits!(IMM8, 8); + _mm_mask_reduce_ph::(_mm_setzero_ph(), k, a) +} + +/// Extract the reduced argument of packed half-precision (16-bit) floating-point elements in a by the +/// number of bits specified by imm8, and store the results in dst. +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_reduce_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_reduce_ph(a: __m256h) -> __m256h { + static_assert_uimm_bits!(IMM8, 8); + _mm256_mask_reduce_ph::(_mm256_undefined_ph(), 0xffff, a) +} + +/// Extract the reduced argument of packed half-precision (16-bit) floating-point elements in a by the +/// number of bits specified by imm8, and store the results in dst using writemask k (elements are copied +/// from src when the corresponding mask bit is not set). +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_reduce_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_reduce_ph( + src: __m256h, + k: __mmask16, + a: __m256h, +) -> __m256h { + static_assert_uimm_bits!(IMM8, 8); + vreduceph_256(a, IMM8, src, k) +} + +/// Extract the reduced argument of packed half-precision (16-bit) floating-point elements in a by the +/// number of bits specified by imm8, and store the results in dst using zeromask k (elements are zeroed +/// out when the corresponding mask bit is not set). +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_reduce_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_reduce_ph(k: __mmask16, a: __m256h) -> __m256h { + static_assert_uimm_bits!(IMM8, 8); + _mm256_mask_reduce_ph::(_mm256_setzero_ph(), k, a) +} + +/// Extract the reduced argument of packed half-precision (16-bit) floating-point elements in a by the +/// number of bits specified by imm8, and store the results in dst. +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_reduce_ph(a: __m512h) -> __m512h { + static_assert_uimm_bits!(IMM8, 8); + _mm512_mask_reduce_ph::(_mm512_undefined_ph(), 0xffffffff, a) +} + +/// Extract the reduced argument of packed half-precision (16-bit) floating-point elements in a by the +/// number of bits specified by imm8, and store the results in dst using writemask k (elements are copied +/// from src when the corresponding mask bit is not set). +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_reduce_ph( + src: __m512h, + k: __mmask32, + a: __m512h, +) -> __m512h { + static_assert_uimm_bits!(IMM8, 8); + _mm512_mask_reduce_round_ph::(src, k, a) +} + +/// Extract the reduced argument of packed half-precision (16-bit) floating-point elements in a by the +/// number of bits specified by imm8, and store the results in dst using zeromask k (elements are zeroed +/// out when the corresponding mask bit is not set). +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_reduce_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_reduce_ph(k: __mmask32, a: __m512h) -> __m512h { + static_assert_uimm_bits!(IMM8, 8); + _mm512_mask_reduce_ph::(_mm512_setzero_ph(), k, a) +} + +/// Extract the reduced argument of packed half-precision (16-bit) floating-point elements in a by the +/// number of bits specified by imm8, and store the results in dst. +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0, SAE = 8))] +#[rustc_legacy_const_generics(1, 2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_reduce_round_ph(a: __m512h) -> __m512h { + static_assert_uimm_bits!(IMM8, 8); + static_assert_sae!(SAE); + _mm512_mask_reduce_round_ph::(_mm512_undefined_ph(), 0xffffffff, a) +} + +/// Extract the reduced argument of packed half-precision (16-bit) floating-point elements in a by the +/// number of bits specified by imm8, and store the results in dst using writemask k (elements are copied +/// from src when the corresponding mask bit is not set). +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_reduce_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0, SAE = 8))] +#[rustc_legacy_const_generics(3, 4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_reduce_round_ph( + src: __m512h, + k: __mmask32, + a: __m512h, +) -> __m512h { + static_assert_uimm_bits!(IMM8, 8); + static_assert_sae!(SAE); + vreduceph_512(a, IMM8, src, k, SAE) +} + +/// Extract the reduced argument of packed half-precision (16-bit) floating-point elements in a by the +/// number of bits specified by imm8, and store the results in dst using zeromask k (elements are zeroed +/// out when the corresponding mask bit is not set). +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_reduce_round_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vreduceph, IMM8 = 0, SAE = 8))] +#[rustc_legacy_const_generics(2, 3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_reduce_round_ph( + k: __mmask32, + a: __m512h, +) -> __m512h { + static_assert_uimm_bits!(IMM8, 8); + static_assert_sae!(SAE); + _mm512_mask_reduce_round_ph::(_mm512_setzero_ph(), k, a) +} + +/// Extract the reduced argument of the lower half-precision (16-bit) floating-point element in b by +/// the number of bits specified by imm8, store the result in the lower element of dst, and copy the +/// upper 7 packed elements from a to the upper elements of dst. +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vreducesh, IMM8 = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_reduce_sh(a: __m128h, b: __m128h) -> __m128h { + static_assert_uimm_bits!(IMM8, 8); + _mm_mask_reduce_sh::(_mm_undefined_ph(), 0xff, a, b) +} + +/// Extract the reduced argument of the lower half-precision (16-bit) floating-point element in b by +/// the number of bits specified by imm8, store the result in the lower element of dst using writemask k +/// (the element is copied from src when mask bit 0 is not set), and copy the upper 7 packed elements from +/// a to the upper elements of dst. +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_reduce_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vreducesh, IMM8 = 0))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_reduce_sh( + src: __m128h, + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_uimm_bits!(IMM8, 8); + _mm_mask_reduce_round_sh::(src, k, a, b) +} + +/// Extract the reduced argument of the lower half-precision (16-bit) floating-point element in b by +/// the number of bits specified by imm8, store the result in the lower element of dst using zeromask k +/// (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from a +/// to the upper elements of dst. +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_reduce_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vreducesh, IMM8 = 0))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_reduce_sh(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + static_assert_uimm_bits!(IMM8, 8); + _mm_mask_reduce_sh::(_mm_setzero_ph(), k, a, b) +} + +/// Extract the reduced argument of the lower half-precision (16-bit) floating-point element in b by +/// the number of bits specified by imm8, store the result in the lower element of dst, and copy the upper +/// 7 packed elements from a to the upper elements of dst. +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vreducesh, IMM8 = 0, SAE = 8))] +#[rustc_legacy_const_generics(2, 3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_reduce_round_sh( + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_uimm_bits!(IMM8, 8); + static_assert_sae!(SAE); + _mm_mask_reduce_round_sh::(_mm_undefined_ph(), 0xff, a, b) +} + +/// Extract the reduced argument of the lower half-precision (16-bit) floating-point element in b by +/// the number of bits specified by imm8, store the result in the lower element of dst using writemask k +/// (the element is copied from src when mask bit 0 is not set), and copy the upper 7 packed elements from a +/// to the upper elements of dst. +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_reduce_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vreducesh, IMM8 = 0, SAE = 8))] +#[rustc_legacy_const_generics(4, 5)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_reduce_round_sh( + src: __m128h, + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_uimm_bits!(IMM8, 8); + static_assert_sae!(SAE); + vreducesh(a, b, src, k, IMM8, SAE) +} + +/// Extract the reduced argument of the lower half-precision (16-bit) floating-point element in b by +/// the number of bits specified by imm8, store the result in the lower element of dst using zeromask k +/// (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from a +/// to the upper elements of dst. +/// +/// Rounding is done according to the imm8 parameter, which can be one of: +/// +/// _MM_FROUND_TO_NEAREST_INT // round to nearest +/// _MM_FROUND_TO_NEG_INF // round down +/// _MM_FROUND_TO_POS_INF // round up +/// _MM_FROUND_TO_ZERO // truncate +/// _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see _MM_SET_ROUNDING_MODE +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_reduce_round_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vreducesh, IMM8 = 0, SAE = 8))] +#[rustc_legacy_const_generics(3, 4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_reduce_round_sh( + k: __mmask8, + a: __m128h, + b: __m128h, +) -> __m128h { + static_assert_uimm_bits!(IMM8, 8); + static_assert_sae!(SAE); + _mm_mask_reduce_round_sh::(_mm_setzero_ph(), k, a, b) +} + +#[allow(improper_ctypes)] +extern "C" { + #[link_name = "llvm.x86.avx512fp16.mask.cmp.sh"] + fn vcmpsh(a: __m128h, b: __m128h, imm8: i32, mask: __mmask8, sae: i32) -> __mmask8; + #[link_name = "llvm.x86.avx512fp16.vcomi.sh"] + fn vcomish(a: __m128h, b: __m128h, imm8: i32, sae: i32) -> i32; + + #[link_name = "llvm.x86.avx512fp16.add.ph.512"] + fn vaddph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.sub.ph.512"] + fn vsubph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mul.ph.512"] + fn vmulph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.div.ph.512"] + fn vdivph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + + #[link_name = "llvm.x86.avx512fp16.mask.add.sh.round"] + fn vaddsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.sub.sh.round"] + fn vsubsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.mul.sh.round"] + fn vmulsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.div.sh.round"] + fn vdivsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.128"] + fn vfmulcph_128(a: __m128, b: __m128, src: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.256"] + fn vfmulcph_256(a: __m256, b: __m256, src: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.512"] + fn vfmulcph_512(a: __m512, b: __m512, src: __m512, k: __mmask16, rounding: i32) -> __m512; + #[link_name = "llvm.x86.avx512fp16.mask.vfmul.csh"] + fn vfmulcsh(a: __m128, b: __m128, src: __m128, k: __mmask8, rounding: i32) -> __m128; + + #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.128"] + fn vfcmulcph_128(a: __m128, b: __m128, src: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.256"] + fn vfcmulcph_256(a: __m256, b: __m256, src: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.512"] + fn vfcmulcph_512(a: __m512, b: __m512, src: __m512, k: __mmask16, rounding: i32) -> __m512; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.csh"] + fn vfcmulcsh(a: __m128, b: __m128, src: __m128, k: __mmask8, rounding: i32) -> __m128; + + #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.128"] + fn vfmaddcph_mask3_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.128"] + fn vfmaddcph_maskz_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.256"] + fn vfmaddcph_mask3_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.256"] + fn vfmaddcph_maskz_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.512"] + fn vfmaddcph_mask3_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) -> __m512; + #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.512"] + fn vfmaddcph_maskz_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) -> __m512; + #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.csh"] + fn vfmaddcsh_mask(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; + #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.csh"] + fn vfmaddcsh_maskz(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; + + #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.128"] + fn vfcmaddcph_mask3_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.128"] + fn vfcmaddcph_maskz_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.256"] + fn vfcmaddcph_mask3_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.256"] + fn vfcmaddcph_maskz_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.512"] + fn vfcmaddcph_mask3_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) + -> __m512; + #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.512"] + fn vfcmaddcph_maskz_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) + -> __m512; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.csh"] + fn vfcmaddcsh_mask(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; + #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.csh"] + fn vfcmaddcsh_maskz(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; + + #[link_name = "llvm.x86.avx512fp16.vfmadd.ph.512"] + fn vfmaddph_512(a: __m512h, b: __m512h, c: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.fma.f16"] + fn fmaf16(a: f16, b: f16, c: f16) -> f16; // TODO: use `crate::intrinsics::fmaf16` when it's available + #[link_name = "llvm.x86.avx512fp16.vfmadd.f16"] + fn vfmaddsh(a: f16, b: f16, c: f16, rounding: i32) -> f16; + + #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.128"] + fn vfmaddsubph_128(a: __m128h, b: __m128h, c: __m128h) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.256"] + fn vfmaddsubph_256(a: __m256h, b: __m256h, c: __m256h) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.512"] + fn vfmaddsubph_512(a: __m512h, b: __m512h, c: __m512h, rounding: i32) -> __m512h; + + #[link_name = "llvm.x86.avx512fp16.mask.rcp.ph.128"] + fn vrcpph_128(a: __m128h, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.rcp.ph.256"] + fn vrcpph_256(a: __m256h, src: __m256h, k: __mmask16) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.mask.rcp.ph.512"] + fn vrcpph_512(a: __m512h, src: __m512h, k: __mmask32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.rcp.sh"] + fn vrcpsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.ph.128"] + fn vrsqrtph_128(a: __m128h, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.ph.256"] + fn vrsqrtph_256(a: __m256h, src: __m256h, k: __mmask16) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.ph.512"] + fn vrsqrtph_512(a: __m512h, src: __m512h, k: __mmask32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.sh"] + fn vrsqrtsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.sqrt.ph.512"] + fn vsqrtph_512(a: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.sqrt.sh"] + fn vsqrtsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.max.ph.128"] + fn vmaxph_128(a: __m128h, b: __m128h) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.max.ph.256"] + fn vmaxph_256(a: __m256h, b: __m256h) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.max.ph.512"] + fn vmaxph_512(a: __m512h, b: __m512h, sae: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.max.sh.round"] + fn vmaxsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, sae: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.min.ph.128"] + fn vminph_128(a: __m128h, b: __m128h) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.min.ph.256"] + fn vminph_256(a: __m256h, b: __m256h) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.min.ph.512"] + fn vminph_512(a: __m512h, b: __m512h, sae: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.min.sh.round"] + fn vminsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, sae: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.getexp.ph.128"] + fn vgetexpph_128(a: __m128h, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.getexp.ph.256"] + fn vgetexpph_256(a: __m256h, src: __m256h, k: __mmask16) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.mask.getexp.ph.512"] + fn vgetexpph_512(a: __m512h, src: __m512h, k: __mmask32, sae: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.getexp.sh"] + fn vgetexpsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, sae: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.getmant.ph.128"] + fn vgetmantph_128(a: __m128h, imm8: i32, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.getmant.ph.256"] + fn vgetmantph_256(a: __m256h, imm8: i32, src: __m256h, k: __mmask16) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.mask.getmant.ph.512"] + fn vgetmantph_512(a: __m512h, imm8: i32, src: __m512h, k: __mmask32, sae: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.getmant.sh"] + fn vgetmantsh( + a: __m128h, + b: __m128h, + imm8: i32, + src: __m128h, + k: __mmask8, + sae: i32, + ) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.rndscale.ph.128"] + fn vrndscaleph_128(a: __m128h, imm8: i32, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.rndscale.ph.256"] + fn vrndscaleph_256(a: __m256h, imm8: i32, src: __m256h, k: __mmask16) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.mask.rndscale.ph.512"] + fn vrndscaleph_512(a: __m512h, imm8: i32, src: __m512h, k: __mmask32, sae: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.rndscale.sh"] + fn vrndscalesh( + a: __m128h, + b: __m128h, + src: __m128h, + k: __mmask8, + imm8: i32, + sae: i32, + ) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.scalef.ph.128"] + fn vscalefph_128(a: __m128h, b: __m128h, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.scalef.ph.256"] + fn vscalefph_256(a: __m256h, b: __m256h, src: __m256h, k: __mmask16) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.mask.scalef.ph.512"] + fn vscalefph_512(a: __m512h, b: __m512h, src: __m512h, k: __mmask32, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.scalef.sh"] + fn vscalefsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.reduce.ph.128"] + fn vreduceph_128(a: __m128h, imm8: i32, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.reduce.ph.256"] + fn vreduceph_256(a: __m256h, imm8: i32, src: __m256h, k: __mmask16) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.mask.reduce.ph.512"] + fn vreduceph_512(a: __m512h, imm8: i32, src: __m512h, k: __mmask32, sae: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.reduce.sh"] + fn vreducesh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, imm8: i32, sae: i32) + -> __m128h; +} + +#[cfg(test)] +mod tests { + use crate::core_arch::x86::*; + use crate::mem::transmute; + use crate::ptr::{addr_of, addr_of_mut}; + use stdarch_test::simd_test; + + #[target_feature(enable = "avx512fp16")] + unsafe fn _mm_set1_pch(re: f16, im: f16) -> __m128h { + _mm_setr_ph(re, im, re, im, re, im, re, im) + } + + #[target_feature(enable = "avx512fp16")] + unsafe fn _mm256_set1_pch(re: f16, im: f16) -> __m256h { + _mm256_setr_ph( + re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, + ) + } + + #[target_feature(enable = "avx512fp16")] + unsafe fn _mm512_set1_pch(re: f16, im: f16) -> __m512h { + _mm512_setr_ph( + re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, + re, im, re, im, re, im, re, im, re, im, + ) + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_set_ph() { + let r = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let e = _mm_setr_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_set_ph() { + let r = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let e = _mm256_setr_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_set_ph() { + let r = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let e = _mm512_setr_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_set_sh() { + let r = _mm_set_sh(1.0); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_set1_ph() { + let r = _mm_set1_ph(1.0); + let e = _mm_set_ph(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_set1_ph() { + let r = _mm256_set1_ph(1.0); + let e = _mm256_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_set1_ph() { + let r = _mm512_set1_ph(1.0); + let e = _mm512_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_setr_ph() { + let r = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let e = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_setr_ph() { + let r = _mm256_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let e = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_setr_ph() { + let r = _mm512_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let e = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_setzero_ph() { + let r = _mm_setzero_ph(); + let e = _mm_set1_ph(0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_setzero_ph() { + let r = _mm256_setzero_ph(); + let e = _mm256_set1_ph(0.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_setzero_ph() { + let r = _mm512_setzero_ph(); + let e = _mm512_set1_ph(0.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castsi128_ph() { + let a = _mm_set1_epi16(0x3c00); + let r = _mm_castsi128_ph(a); + let e = _mm_set1_ph(1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castsi256_ph() { + let a = _mm256_set1_epi16(0x3c00); + let r = _mm256_castsi256_ph(a); + let e = _mm256_set1_ph(1.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castsi512_ph() { + let a = _mm512_set1_epi16(0x3c00); + let r = _mm512_castsi512_ph(a); + let e = _mm512_set1_ph(1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castph_si128() { + let a = _mm_set1_ph(1.0); + let r = _mm_castph_si128(a); + let e = _mm_set1_epi16(0x3c00); + assert_eq_m128i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph_si256() { + let a = _mm256_set1_ph(1.0); + let r = _mm256_castph_si256(a); + let e = _mm256_set1_epi16(0x3c00); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph_si512() { + let a = _mm512_set1_ph(1.0); + let r = _mm512_castph_si512(a); + let e = _mm512_set1_epi16(0x3c00); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castps_ph() { + let a = _mm_castsi128_ps(_mm_set1_epi16(0x3c00)); + let r = _mm_castps_ph(a); + let e = _mm_set1_ph(1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castps_ph() { + let a = _mm256_castsi256_ps(_mm256_set1_epi16(0x3c00)); + let r = _mm256_castps_ph(a); + let e = _mm256_set1_ph(1.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castps_ph() { + let a = _mm512_castsi512_ps(_mm512_set1_epi16(0x3c00)); + let r = _mm512_castps_ph(a); + let e = _mm512_set1_ph(1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castph_ps() { + let a = _mm_castsi128_ph(_mm_set1_epi32(0x3f800000)); + let r = _mm_castph_ps(a); + let e = _mm_set1_ps(1.0); + assert_eq_m128(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph_ps() { + let a = _mm256_castsi256_ph(_mm256_set1_epi32(0x3f800000)); + let r = _mm256_castph_ps(a); + let e = _mm256_set1_ps(1.0); + assert_eq_m256(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph_ps() { + let a = _mm512_castsi512_ph(_mm512_set1_epi32(0x3f800000)); + let r = _mm512_castph_ps(a); + let e = _mm512_set1_ps(1.0); + assert_eq_m512(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castpd_ph() { + let a = _mm_castsi128_pd(_mm_set1_epi16(0x3c00)); + let r = _mm_castpd_ph(a); + let e = _mm_set1_ph(1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castpd_ph() { + let a = _mm256_castsi256_pd(_mm256_set1_epi16(0x3c00)); + let r = _mm256_castpd_ph(a); + let e = _mm256_set1_ph(1.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castpd_ph() { + let a = _mm512_castsi512_pd(_mm512_set1_epi16(0x3c00)); + let r = _mm512_castpd_ph(a); + let e = _mm512_set1_ph(1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castph_pd() { + let a = _mm_castsi128_ph(_mm_set1_epi64x(0x3ff0000000000000)); + let r = _mm_castph_pd(a); + let e = _mm_set1_pd(1.0); + assert_eq_m128d(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph_pd() { + let a = _mm256_castsi256_ph(_mm256_set1_epi64x(0x3ff0000000000000)); + let r = _mm256_castph_pd(a); + let e = _mm256_set1_pd(1.0); + assert_eq_m256d(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph_pd() { + let a = _mm512_castsi512_ph(_mm512_set1_epi64(0x3ff0000000000000)); + let r = _mm512_castph_pd(a); + let e = _mm512_set1_pd(1.0); + assert_eq_m512d(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph256_ph128() { + let a = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + let r = _mm256_castph256_ph128(a); + let e = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph512_ph128() { + let a = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., + ); + let r = _mm512_castph512_ph128(a); + let e = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph512_ph256() { + let a = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., + ); + let r = _mm512_castph512_ph256(a); + let e = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph128_ph256() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm256_castph128_ph256(a); + assert_eq_m128h(_mm256_castph256_ph128(r), a); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph128_ph512() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm512_castph128_ph512(a); + assert_eq_m128h(_mm512_castph512_ph128(r), a); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph256_ph512() { + let a = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + let r = _mm512_castph256_ph512(a); + assert_eq_m256h(_mm512_castph512_ph256(r), a); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_zextph128_ph256() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm256_zextph128_ph256(a); + let e = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 0., 0., 0., 0., 0., 0., 0., 0., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_zextph128_ph512() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm512_zextph128_ph512(a); + let e = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_zextph256_ph512() { + let a = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + let r = _mm512_zextph256_ph512(a); + let e = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cmp_round_sh_mask() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_cmp_round_sh_mask::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_cmp_round_sh_mask() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_mask_cmp_round_sh_mask::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(0, a, b); + assert_eq!(r, 0); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cmp_sh_mask() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_cmp_sh_mask::<_CMP_EQ_OQ>(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_cmp_sh_mask() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_mask_cmp_sh_mask::<_CMP_EQ_OQ>(0, a, b); + assert_eq!(r, 0); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comi_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_comi_round_sh::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comi_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_comi_sh::<_CMP_EQ_OQ>(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comieq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_comieq_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comige_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_comige_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comigt_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_comigt_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comile_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_comile_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comilt_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_comilt_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comineq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_comineq_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomieq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_ucomieq_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomige_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_ucomige_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomigt_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_ucomigt_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomile_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_ucomile_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomilt_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_ucomilt_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomineq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_ucomineq_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_load_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_load_ph(addr_of!(a).cast()); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_load_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_load_ph(addr_of!(a).cast()); + assert_eq_m256h(a, b); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_set_ph() { - let r = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let e = _mm_setr_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + unsafe fn test_mm512_load_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_load_ph(addr_of!(a).cast()); + assert_eq_m512h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_load_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_load_sh(addr_of!(a).cast()); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_load_sh() { + let a = _mm_set_sh(1.0); + let src = _mm_set_sh(2.); + let b = _mm_mask_load_sh(src, 1, addr_of!(a).cast()); + assert_eq_m128h(a, b); + let b = _mm_mask_load_sh(src, 0, addr_of!(a).cast()); + assert_eq_m128h(src, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_load_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_maskz_load_sh(1, addr_of!(a).cast()); + assert_eq_m128h(a, b); + let b = _mm_maskz_load_sh(0, addr_of!(a).cast()); + assert_eq_m128h(_mm_setzero_ph(), b); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_loadu_ph() { + let array = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; + let r = _mm_loadu_ph(array.as_ptr()); + let e = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_loadu_ph() { + let array = [ + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ]; + let r = _mm256_loadu_ph(array.as_ptr()); + let e = _mm256_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_loadu_ph() { + let array = [ + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ]; + let r = _mm512_loadu_ph(array.as_ptr()); + let e = _mm512_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_move_sh() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_sh(9.0); + let r = _mm_move_sh(a, b); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_move_sh() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_sh(9.0); + let src = _mm_set_sh(10.0); + let r = _mm_mask_move_sh(src, 0, a, b); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 10.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_move_sh() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_sh(9.0); + let r = _mm_maskz_move_sh(0, a, b); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_store_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let mut b = _mm_setzero_ph(); + _mm_store_ph(addr_of_mut!(b).cast(), a); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_store_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let mut b = _mm256_setzero_ph(); + _mm256_store_ph(addr_of_mut!(b).cast(), a); + assert_eq_m256h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_store_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let mut b = _mm512_setzero_ph(); + _mm512_store_ph(addr_of_mut!(b).cast(), a); + assert_eq_m512h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_store_sh() { + let a = _mm_set_sh(1.0); + let mut b = _mm_setzero_ph(); + _mm_store_sh(addr_of_mut!(b).cast(), a); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_store_sh() { + let a = _mm_set_sh(1.0); + let mut b = _mm_setzero_ph(); + _mm_mask_store_sh(addr_of_mut!(b).cast(), 0, a); + assert_eq_m128h(_mm_setzero_ph(), b); + _mm_mask_store_sh(addr_of_mut!(b).cast(), 1, a); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_storeu_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let mut array = [0.0; 8]; + _mm_storeu_ph(array.as_mut_ptr(), a); + assert_eq_m128h(a, _mm_loadu_ph(array.as_ptr())); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_storeu_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let mut array = [0.0; 16]; + _mm256_storeu_ph(array.as_mut_ptr(), a); + assert_eq_m256h(a, _mm256_loadu_ph(array.as_ptr())); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_storeu_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let mut array = [0.0; 32]; + _mm512_storeu_ph(array.as_mut_ptr(), a); + assert_eq_m512h(a, _mm512_loadu_ph(array.as_ptr())); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_add_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_add_ph(a, b); + let e = _mm_set1_ph(9.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_add_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_add_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(10., 9., 12., 9., 14., 9., 16., 9.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_add_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_maskz_add_ph(0b01010101, a, b); + let e = _mm_set_ph(0., 9., 0., 9., 0., 9., 0., 9.); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_set_ph() { - let r = _mm256_set_ph( + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_add_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_add_ph(a, b); + let e = _mm256_set1_ph(17.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_add_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let src = _mm256_set_ph( + 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + ); + let r = _mm256_mask_add_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 18., 17., 20., 17., 22., 17., 24., 17., 26., 17., 28., 17., 30., 17., 32., 17., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_add_ph() { + let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let e = _mm256_setr_ph( + let b = _mm256_set_ph( 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, ); + let r = _mm256_maskz_add_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., + ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_set_ph() { - let r = _mm512_set_ph( + unsafe fn test_mm512_add_ph() { + let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, ); - let e = _mm512_setr_ph( + let b = _mm512_set_ph( 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, ); + let r = _mm512_add_ph(a, b); + let e = _mm512_set1_ph(33.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_set_sh() { - let r = _mm_set_sh(1.0); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_set1_ph() { - let r = _mm_set1_ph(1.0); - let e = _mm_set_ph(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_set1_ph() { - let r = _mm256_set1_ph(1.0); - let e = _mm256_set_ph( - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + unsafe fn test_mm512_mask_add_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_set1_ph() { - let r = _mm512_set1_ph(1.0); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_add_ph(src, 0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 34., 33., 36., 33., 38., 33., 40., 33., 42., 33., 44., 33., 46., 33., 48., 33., 50., + 33., 52., 33., 54., 33., 56., 33., 58., 33., 60., 33., 62., 33., 64., 33., ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_setr_ph() { - let r = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let e = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_maskz_add_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_add_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., + 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_setr_ph() { - let r = _mm256_setr_ph( + unsafe fn test_mm512_add_round_ph() { + let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); - let e = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, ); - assert_eq_m256h(r, e); + let r = _mm512_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_ph(33.0); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_setr_ph() { - let r = _mm512_setr_ph( + unsafe fn test_mm512_mask_add_round_ph() { + let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, ); - let e = _mm512_set_ph( + let b = _mm512_set_ph( 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 34., 33., 36., 33., 38., 33., 40., 33., 42., 33., 44., 33., 46., 33., 48., 33., 50., + 33., 52., 33., 54., 33., 56., 33., 58., 33., 60., 33., 62., 33., 64., 33., + ); assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_setzero_ph() { - let r = _mm_setzero_ph(); - let e = _mm_set1_ph(0.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_setzero_ph() { - let r = _mm256_setzero_ph(); - let e = _mm256_set1_ph(0.0); - assert_eq_m256h(r, e); - } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_setzero_ph() { - let r = _mm512_setzero_ph(); - let e = _mm512_set1_ph(0.0); + unsafe fn test_mm512_maskz_add_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., + 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castsi128_ph() { - let a = _mm_set1_epi16(0x3c00); - let r = _mm_castsi128_ph(a); - let e = _mm_set1_ph(1.0); + unsafe fn test_mm_add_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(3.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castsi256_ph() { - let a = _mm256_set1_epi16(0x3c00); - let r = _mm256_castsi256_ph(a); - let e = _mm256_set1_ph(1.0); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castsi512_ph() { - let a = _mm512_set1_epi16(0x3c00); - let r = _mm512_castsi512_ph(a); - let e = _mm512_set1_ph(1.0); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castph_si128() { - let a = _mm_set1_ph(1.0); - let r = _mm_castph_si128(a); - let e = _mm_set1_epi16(0x3c00); - assert_eq_m128i(r, e); + unsafe fn test_mm_mask_add_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph_si256() { - let a = _mm256_set1_ph(1.0); - let r = _mm256_castph_si256(a); - let e = _mm256_set1_epi16(0x3c00); - assert_eq_m256i(r, e); + unsafe fn test_mm_maskz_add_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = + _mm_maskz_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph_si512() { - let a = _mm512_set1_ph(1.0); - let r = _mm512_castph_si512(a); - let e = _mm512_set1_epi16(0x3c00); - assert_eq_m512i(r, e); + unsafe fn test_mm_add_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_add_sh(a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castps_ph() { - let a = _mm_castsi128_ps(_mm_set1_epi16(0x3c00)); - let r = _mm_castps_ph(a); - let e = _mm_set1_ph(1.0); + unsafe fn test_mm_mask_add_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_add_sh(src, 0, a, b); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_add_sh(src, 1, a, b); + let e = _mm_set_sh(3.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castps_ph() { - let a = _mm256_castsi256_ps(_mm256_set1_epi16(0x3c00)); - let r = _mm256_castps_ph(a); - let e = _mm256_set1_ph(1.0); - assert_eq_m256h(r, e); + unsafe fn test_mm_maskz_add_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_maskz_add_sh(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_add_sh(1, a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castps_ph() { - let a = _mm512_castsi512_ps(_mm512_set1_epi16(0x3c00)); - let r = _mm512_castps_ph(a); - let e = _mm512_set1_ph(1.0); - assert_eq_m512h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_sub_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_sub_ph(a, b); + let e = _mm_set_ph(-7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castph_ps() { - let a = _mm_castsi128_ph(_mm_set1_epi32(0x3f800000)); - let r = _mm_castph_ps(a); - let e = _mm_set1_ps(1.0); - assert_eq_m128(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_sub_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_sub_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(10., -5., 12., -1., 14., 3., 16., 7.); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph_ps() { - let a = _mm256_castsi256_ph(_mm256_set1_epi32(0x3f800000)); - let r = _mm256_castph_ps(a); - let e = _mm256_set1_ps(1.0); - assert_eq_m256(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_sub_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_maskz_sub_ph(0b01010101, a, b); + let e = _mm_set_ph(0., -5., 0., -1., 0., 3., 0., 7.); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph_ps() { - let a = _mm512_castsi512_ph(_mm512_set1_epi32(0x3f800000)); - let r = _mm512_castph_ps(a); - let e = _mm512_set1_ps(1.0); - assert_eq_m512(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_sub_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_sub_ph(a, b); + let e = _mm256_set_ph( + -15.0, -13.0, -11.0, -9.0, -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, + 15.0, + ); + assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castpd_ph() { - let a = _mm_castsi128_pd(_mm_set1_epi16(0x3c00)); - let r = _mm_castpd_ph(a); - let e = _mm_set1_ph(1.0); - assert_eq_m128h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_sub_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let src = _mm256_set_ph( + 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + ); + let r = _mm256_mask_sub_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 18., -13., 20., -9., 22., -5., 24., -1., 26., 3., 28., 7., 30., 11., 32., 15., + ); + assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castpd_ph() { - let a = _mm256_castsi256_pd(_mm256_set1_epi16(0x3c00)); - let r = _mm256_castpd_ph(a); - let e = _mm256_set1_ph(1.0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_sub_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_maskz_sub_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., 0., 7., 0., 11., 0., 15., + ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castpd_ph() { - let a = _mm512_castsi512_pd(_mm512_set1_epi16(0x3c00)); - let r = _mm512_castpd_ph(a); - let e = _mm512_set1_ph(1.0); + unsafe fn test_mm512_sub_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_sub_ph(a, b); + let e = _mm512_set_ph( + -31.0, -29.0, -27.0, -25.0, -23.0, -21.0, -19.0, -17.0, -15.0, -13.0, -11.0, -9.0, + -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, + 23.0, 25.0, 27.0, 29.0, 31.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castph_pd() { - let a = _mm_castsi128_ph(_mm_set1_epi64x(0x3ff0000000000000)); - let r = _mm_castph_pd(a); - let e = _mm_set1_pd(1.0); - assert_eq_m128d(r, e); + unsafe fn test_mm512_mask_sub_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_sub_ph(src, 0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 34., -29., 36., -25., 38., -21., 40., -17., 42., -13., 44., -9., 46., -5., 48., -1., + 50., 3., 52., 7., 54., 11., 56., 15., 58., 19., 60., 23., 62., 27., 64., 31., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph_pd() { - let a = _mm256_castsi256_ph(_mm256_set1_epi64x(0x3ff0000000000000)); - let r = _mm256_castph_pd(a); - let e = _mm256_set1_pd(1.0); - assert_eq_m256d(r, e); + unsafe fn test_mm512_maskz_sub_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_sub_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0., -29., 0., -25., 0., -21., 0., -17., 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., + 0., 7., 0., 11., 0., 15., 0., 19., 0., 23., 0., 27., 0., 31., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph_pd() { - let a = _mm512_castsi512_ph(_mm512_set1_epi64(0x3ff0000000000000)); - let r = _mm512_castph_pd(a); - let e = _mm512_set1_pd(1.0); - assert_eq_m512d(r, e); + unsafe fn test_mm512_sub_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set_ph( + -31.0, -29.0, -27.0, -25.0, -23.0, -21.0, -19.0, -17.0, -15.0, -13.0, -11.0, -9.0, + -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, + 23.0, 25.0, 27.0, 29.0, 31.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph256_ph128() { - let a = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + unsafe fn test_mm512_mask_sub_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); - let r = _mm256_castph256_ph128(a); - let e = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - assert_eq_m128h(r, e); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 34., -29., 36., -25., 38., -21., 40., -17., 42., -13., 44., -9., 46., -5., 48., -1., + 50., 3., 52., 7., 54., 11., 56., 15., 58., 19., 60., 23., 62., 27., 64., 31., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph512_ph128() { - let a = _mm512_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., - 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., + unsafe fn test_mm512_maskz_sub_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); - let r = _mm512_castph512_ph128(a); - let e = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 0., -29., 0., -25., 0., -21., 0., -17., 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., + 0., 7., 0., 11., 0., 15., 0., 19., 0., 23., 0., 27., 0., 31., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_sub_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(-1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph512_ph256() { - let a = _mm512_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., - 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., + unsafe fn test_mm_mask_sub_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, ); - let r = _mm512_castph512_ph256(a); - let e = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, ); - assert_eq_m256h(r, e); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph128_ph256() { - let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - let r = _mm256_castph128_ph256(a); - assert_eq_m128h(_mm256_castph256_ph128(r), a); + unsafe fn test_mm_maskz_sub_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = + _mm_maskz_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph128_ph512() { - let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - let r = _mm512_castph128_ph512(a); - assert_eq_m128h(_mm512_castph512_ph128(r), a); + unsafe fn test_mm_sub_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_sub_sh(a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph256_ph512() { - let a = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., - ); - let r = _mm512_castph256_ph512(a); - assert_eq_m256h(_mm512_castph512_ph256(r), a); + unsafe fn test_mm_mask_sub_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_sub_sh(src, 0, a, b); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_sub_sh(src, 1, a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_zextph128_ph256() { - let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - let r = _mm256_zextph128_ph256(a); - let e = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 0., 0., 0., 0., 0., 0., 0., 0., - ); - assert_eq_m256h(r, e); + unsafe fn test_mm_maskz_sub_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_maskz_sub_sh(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_sub_sh(1, a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_zextph128_ph512() { - let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - let r = _mm512_zextph128_ph512(a); - let e = _mm512_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., - 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., - ); - assert_eq_m512h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mul_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_mul_ph(a, b); + let e = _mm_set_ph(8.0, 14.0, 18.0, 20.0, 20.0, 18.0, 14.0, 8.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_zextph256_ph512() { - let a = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., - ); - let r = _mm512_zextph256_ph512(a); - let e = _mm512_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 0., 0., 0., 0., - 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., - ); - assert_eq_m512h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_mul_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_mul_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(10., 14., 12., 20., 14., 18., 16., 8.); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cmp_round_sh_mask() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_cmp_round_sh_mask::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(a, b); - assert_eq!(r, 1); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_mul_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_maskz_mul_ph(0b01010101, a, b); + let e = _mm_set_ph(0., 14., 0., 20., 0., 18., 0., 8.); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_cmp_round_sh_mask() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_mask_cmp_round_sh_mask::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(0, a, b); - assert_eq!(r, 0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mul_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_mul_ph(a, b); + let e = _mm256_set_ph( + 16.0, 30.0, 42.0, 52.0, 60.0, 66.0, 70.0, 72.0, 72.0, 70.0, 66.0, 60.0, 52.0, 42.0, + 30.0, 16.0, + ); + assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cmp_sh_mask() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_cmp_sh_mask::<_CMP_EQ_OQ>(a, b); - assert_eq!(r, 1); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_mul_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let src = _mm256_set_ph( + 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + ); + let r = _mm256_mask_mul_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 18., 30., 20., 52., 22., 66., 24., 72., 26., 70., 28., 60., 30., 42., 32., 16., + ); + assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_cmp_sh_mask() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_mask_cmp_sh_mask::<_CMP_EQ_OQ>(0, a, b); - assert_eq!(r, 0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_mul_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_maskz_mul_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0., 30., 0., 52., 0., 66., 0., 72., 0., 70., 0., 60., 0., 42., 0., 16., + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comi_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_comi_round_sh::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_mul_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_mul_ph(a, b); + let e = _mm512_set_ph( + 32.0, 62.0, 90.0, 116.0, 140.0, 162.0, 182.0, 200.0, 216.0, 230.0, 242.0, 252.0, 260.0, + 266.0, 270.0, 272.0, 272.0, 270.0, 266.0, 260.0, 252.0, 242.0, 230.0, 216.0, 200.0, + 182.0, 162.0, 140.0, 116.0, 90.0, 62.0, 32.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comi_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_comi_sh::<_CMP_EQ_OQ>(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_mask_mul_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_mul_ph(src, 0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 34., 62., 36., 116., 38., 162., 40., 200., 42., 230., 44., 252., 46., 266., 48., 272., + 50., 270., 52., 260., 54., 242., 56., 216., 58., 182., 60., 140., 62., 90., 64., 32., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comieq_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_comieq_sh(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_maskz_mul_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_mul_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0., 62., 0., 116., 0., 162., 0., 200., 0., 230., 0., 252., 0., 266., 0., 272., 0., + 270., 0., 260., 0., 242., 0., 216., 0., 182., 0., 140., 0., 90., 0., 32., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comige_sh() { - let a = _mm_set_sh(2.0); - let b = _mm_set_sh(1.0); - let r = _mm_comige_sh(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_mul_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set_ph( + 32.0, 62.0, 90.0, 116.0, 140.0, 162.0, 182.0, 200.0, 216.0, 230.0, 242.0, 252.0, 260.0, + 266.0, 270.0, 272.0, 272.0, 270.0, 266.0, 260.0, 252.0, 242.0, 230.0, 216.0, 200.0, + 182.0, 162.0, 140.0, 116.0, 90.0, 62.0, 32.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comigt_sh() { - let a = _mm_set_sh(2.0); - let b = _mm_set_sh(1.0); - let r = _mm_comigt_sh(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_mask_mul_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 34., 62., 36., 116., 38., 162., 40., 200., 42., 230., 44., 252., 46., 266., 48., 272., + 50., 270., 52., 260., 54., 242., 56., 216., 58., 182., 60., 140., 62., 90., 64., 32., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comile_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_comile_sh(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_maskz_mul_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 0., 62., 0., 116., 0., 162., 0., 200., 0., 230., 0., 252., 0., 266., 0., 272., 0., + 270., 0., 260., 0., 242., 0., 216., 0., 182., 0., 140., 0., 90., 0., 32., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comilt_sh() { + unsafe fn test_mm_mul_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_comilt_sh(a, b); - assert_eq!(r, 1); + let r = _mm_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comineq_sh() { + unsafe fn test_mm_mask_mul_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_comineq_sh(a, b); - assert_eq!(r, 1); + let src = _mm_set_sh(4.0); + let r = _mm_mask_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomieq_sh() { + unsafe fn test_mm_maskz_mul_round_sh() { let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_ucomieq_sh(a, b); - assert_eq!(r, 1); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomige_sh() { - let a = _mm_set_sh(2.0); - let b = _mm_set_sh(1.0); - let r = _mm_ucomige_sh(a, b); - assert_eq!(r, 1); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomigt_sh() { - let a = _mm_set_sh(2.0); - let b = _mm_set_sh(1.0); - let r = _mm_ucomigt_sh(a, b); - assert_eq!(r, 1); + let b = _mm_set_sh(2.0); + let r = + _mm_maskz_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomile_sh() { + unsafe fn test_mm_mul_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_ucomile_sh(a, b); - assert_eq!(r, 1); + let r = _mm_mul_sh(a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomilt_sh() { + unsafe fn test_mm_mask_mul_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_ucomilt_sh(a, b); - assert_eq!(r, 1); + let src = _mm_set_sh(4.0); + let r = _mm_mask_mul_sh(src, 0, a, b); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_mul_sh(src, 1, a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomineq_sh() { + unsafe fn test_mm_maskz_mul_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_ucomineq_sh(a, b); - assert_eq!(r, 1); + let r = _mm_maskz_mul_sh(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_mul_sh(1, a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_load_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_load_ph(addr_of!(a).cast()); - assert_eq_m128h(a, b); + unsafe fn test_mm_div_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let r = _mm_div_ph(a, b); + let e = _mm_set1_ph(0.5); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_load_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_load_ph(addr_of!(a).cast()); - assert_eq_m256h(a, b); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_load_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_load_ph(addr_of!(a).cast()); - assert_eq_m512h(a, b); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_load_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_load_sh(addr_of!(a).cast()); - assert_eq_m128h(a, b); + unsafe fn test_mm_mask_div_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let src = _mm_set_ph(4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0); + let r = _mm_mask_div_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_load_sh() { - let a = _mm_set_sh(1.0); - let src = _mm_set_sh(2.); - let b = _mm_mask_load_sh(src, 1, addr_of!(a).cast()); - assert_eq_m128h(a, b); - let b = _mm_mask_load_sh(src, 0, addr_of!(a).cast()); - assert_eq_m128h(src, b); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_div_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let r = _mm_maskz_div_ph(0b01010101, a, b); + let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_load_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_maskz_load_sh(1, addr_of!(a).cast()); - assert_eq_m128h(a, b); - let b = _mm_maskz_load_sh(0, addr_of!(a).cast()); - assert_eq_m128h(_mm_setzero_ph(), b); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_div_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let r = _mm256_div_ph(a, b); + let e = _mm256_set1_ph(0.5); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_loadu_ph() { - let array = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; - let r = _mm_loadu_ph(array.as_ptr()); - let e = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - assert_eq_m128h(r, e); + unsafe fn test_mm256_mask_div_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let src = _mm256_set_ph( + 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, + 19.0, + ); + let r = _mm256_mask_div_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, + ); + assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_loadu_ph() { - let array = [ - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ]; - let r = _mm256_loadu_ph(array.as_ptr()); - let e = _mm256_setr_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_div_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let r = _mm256_maskz_div_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_loadu_ph() { - let array = [ - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ]; - let r = _mm512_loadu_ph(array.as_ptr()); - let e = _mm512_setr_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); + unsafe fn test_mm512_div_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_div_ph(a, b); + let e = _mm512_set1_ph(0.5); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_move_sh() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_sh(9.0); - let r = _mm_move_sh(a, b); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_mask_div_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let src = _mm512_set_ph( + 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, + 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, + 33.0, 34.0, 35.0, + ); + let r = _mm512_mask_div_ph(src, 0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, + 20.0, 0.5, 22.0, 0.5, 24.0, 0.5, 26.0, 0.5, 28.0, 0.5, 30.0, 0.5, 32.0, 0.5, 34.0, 0.5, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_move_sh() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_sh(9.0); - let src = _mm_set_sh(10.0); - let r = _mm_mask_move_sh(src, 0, a, b); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 10.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_maskz_div_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_maskz_div_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, + 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_move_sh() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_sh(9.0); - let r = _mm_maskz_move_sh(0, a, b); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 0.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_store_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let mut b = _mm_setzero_ph(); - _mm_store_ph(addr_of_mut!(b).cast(), a); - assert_eq_m128h(a, b); + unsafe fn test_mm512_div_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_ph(0.5); + assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_store_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_div_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let src = _mm512_set_ph( + 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, + 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, + 33.0, 34.0, 35.0, ); - let mut b = _mm256_setzero_ph(); - _mm256_store_ph(addr_of_mut!(b).cast(), a); - assert_eq_m256h(a, b); + let r = _mm512_mask_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, + 20.0, 0.5, 22.0, 0.5, 24.0, 0.5, 26.0, 0.5, 28.0, 0.5, 30.0, 0.5, 32.0, 0.5, 34.0, 0.5, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_store_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, + unsafe fn test_mm512_maskz_div_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_maskz_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, ); - let mut b = _mm512_setzero_ph(); - _mm512_store_ph(addr_of_mut!(b).cast(), a); - assert_eq_m512h(a, b); + let e = _mm512_set_ph( + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, + 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_store_sh() { + unsafe fn test_mm_div_round_sh() { let a = _mm_set_sh(1.0); - let mut b = _mm_setzero_ph(); - _mm_store_sh(addr_of_mut!(b).cast(), a); - assert_eq_m128h(a, b); + let b = _mm_set_sh(2.0); + let r = _mm_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(0.5); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_store_sh() { + unsafe fn test_mm_mask_div_round_sh() { let a = _mm_set_sh(1.0); - let mut b = _mm_setzero_ph(); - _mm_mask_store_sh(addr_of_mut!(b).cast(), 0, a); - assert_eq_m128h(_mm_setzero_ph(), b); - _mm_mask_store_sh(addr_of_mut!(b).cast(), 1, a); - assert_eq_m128h(a, b); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_set_sh(0.5); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_storeu_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let mut array = [0.0; 8]; - _mm_storeu_ph(array.as_mut_ptr(), a); - assert_eq_m128h(a, _mm_loadu_ph(array.as_ptr())); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_div_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = + _mm_maskz_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(0.5); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_storeu_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let mut array = [0.0; 16]; - _mm256_storeu_ph(array.as_mut_ptr(), a); - assert_eq_m256h(a, _mm256_loadu_ph(array.as_ptr())); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_div_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_div_sh(a, b); + let e = _mm_set_sh(0.5); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_storeu_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let mut array = [0.0; 32]; - _mm512_storeu_ph(array.as_mut_ptr(), a); - assert_eq_m512h(a, _mm512_loadu_ph(array.as_ptr())); + unsafe fn test_mm_mask_div_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_div_sh(src, 0, a, b); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_div_sh(src, 1, a, b); + let e = _mm_set_sh(0.5); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_add_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_add_ph(a, b); - let e = _mm_set1_ph(9.0); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_div_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_maskz_div_sh(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_div_sh(1, a, b); + let e = _mm_set_sh(0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_add_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm_mask_add_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(10., 9., 12., 9., 14., 9., 16., 9.); + unsafe fn test_mm_mul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let r = _mm_mul_pch(a, b); + let e = _mm_set1_pch(-1.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_add_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_maskz_add_ph(0b01010101, a, b); - let e = _mm_set_ph(0., 9., 0., 9., 0., 9., 0., 9.); + unsafe fn test_mm_mask_mul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); + let r = _mm_mask_mul_pch(src, 0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_add_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_add_ph(a, b); - let e = _mm256_set1_ph(17.0); + unsafe fn test_mm_maskz_mul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let r = _mm_maskz_mul_pch(0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_mul_pch(a, b); + let e = _mm256_set1_pch(-1.0, 0.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_add_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let src = _mm256_set_ph( - 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + unsafe fn test_mm256_mask_mul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let src = _mm256_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, ); - let r = _mm256_mask_add_ph(src, 0b0101010101010101, a, b); - let e = _mm256_set_ph( - 18., 17., 20., 17., 22., 17., 24., 17., 26., 17., 28., 17., 30., 17., 32., 17., + let r = _mm256_mask_mul_pch(src, 0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_add_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_maskz_add_ph(0b0101010101010101, a, b); - let e = _mm256_set_ph( - 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., + unsafe fn test_mm256_maskz_mul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_maskz_mul_pch(0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_add_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_add_ph(a, b); - let e = _mm512_set1_ph(33.0); + unsafe fn test_mm512_mul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_mul_pch(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_add_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + unsafe fn test_mm512_mask_mul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, ); - let r = _mm512_mask_add_ph(src, 0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 34., 33., 36., 33., 38., 33., 40., 33., 42., 33., 44., 33., 46., 33., 48., 33., 50., - 33., 52., 33., 54., 33., 56., 33., 58., 33., 60., 33., 62., 33., 64., 33., + let r = _mm512_mask_mul_pch(src, 0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_add_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_add_ph(0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., - 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., + unsafe fn test_mm512_maskz_mul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_mul_pch(0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_add_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_ph(33.0); + unsafe fn test_mm512_mul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_add_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + unsafe fn test_mm512_mask_mul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, ); - let r = _mm512_mask_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, - 0b01010101010101010101010101010101, + 0b0101010101010101, a, b, ); - let e = _mm512_set_ph( - 34., 33., 36., 33., 38., 33., 40., 33., 42., 33., 44., 33., 46., 33., 48., 33., 50., - 33., 52., 33., 54., 33., 56., 33., 58., 33., 60., 33., 62., 33., 64., 33., + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_add_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, + unsafe fn test_mm512_maskz_mul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, a, b, ); - let e = _mm512_set_ph( - 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., - 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_add_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_set_sh(3.0); + unsafe fn test_mm_mul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_add_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_mask_mul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 0, a, b, ); - let e = _mm_set_sh(4.0); - assert_eq_m128h(r, e); - let r = _mm_mask_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_set_sh(3.0); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_add_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = - _mm_maskz_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_set_sh(0.0); - assert_eq_m128h(r, e); + unsafe fn test_mm_maskz_mul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); let r = - _mm_maskz_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_set_sh(3.0); + _mm_maskz_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_add_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_add_sh(a, b); - let e = _mm_set_sh(3.0); + unsafe fn test_mm_mul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_mul_sch(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_add_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_add_sh(src, 0, a, b); - let e = _mm_set_sh(4.0); - assert_eq_m128h(r, e); - let r = _mm_mask_add_sh(src, 1, a, b); - let e = _mm_set_sh(3.0); + unsafe fn test_mm_mask_mul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_mul_sch(src, 0, a, b); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_add_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_maskz_add_sh(0, a, b); - let e = _mm_set_sh(0.0); - assert_eq_m128h(r, e); - let r = _mm_maskz_add_sh(1, a, b); - let e = _mm_set_sh(3.0); + unsafe fn test_mm_maskz_mul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_maskz_mul_sch(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_sub_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_sub_ph(a, b); - let e = _mm_set_ph(-7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0); + unsafe fn test_mm_fmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let r = _mm_fmul_pch(a, b); + let e = _mm_set1_pch(-1.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_sub_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm_mask_sub_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(10., -5., 12., -1., 14., 3., 16., 7.); + unsafe fn test_mm_mask_fmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); + let r = _mm_mask_fmul_pch(src, 0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_sub_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_maskz_sub_ph(0b01010101, a, b); - let e = _mm_set_ph(0., -5., 0., -1., 0., 3., 0., 7.); + unsafe fn test_mm_maskz_fmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let r = _mm_maskz_fmul_pch(0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_sub_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_sub_ph(a, b); - let e = _mm256_set_ph( - -15.0, -13.0, -11.0, -9.0, -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, - 15.0, - ); + unsafe fn test_mm256_fmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_fmul_pch(a, b); + let e = _mm256_set1_pch(-1.0, 0.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_sub_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let src = _mm256_set_ph( - 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + unsafe fn test_mm256_mask_fmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let src = _mm256_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, ); - let r = _mm256_mask_sub_ph(src, 0b0101010101010101, a, b); - let e = _mm256_set_ph( - 18., -13., 20., -9., 22., -5., 24., -1., 26., 3., 28., 7., 30., 11., 32., 15., + let r = _mm256_mask_fmul_pch(src, 0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_sub_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_maskz_sub_ph(0b0101010101010101, a, b); - let e = _mm256_set_ph( - 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., 0., 7., 0., 11., 0., 15., + unsafe fn test_mm256_maskz_fmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_maskz_fmul_pch(0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_sub_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_sub_ph(a, b); - let e = _mm512_set_ph( - -31.0, -29.0, -27.0, -25.0, -23.0, -21.0, -19.0, -17.0, -15.0, -13.0, -11.0, -9.0, - -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, - 23.0, 25.0, 27.0, 29.0, 31.0, - ); + unsafe fn test_mm512_fmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_fmul_pch(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_sub_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + unsafe fn test_mm512_mask_fmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, ); - let r = _mm512_mask_sub_ph(src, 0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 34., -29., 36., -25., 38., -21., 40., -17., 42., -13., 44., -9., 46., -5., 48., -1., - 50., 3., 52., 7., 54., 11., 56., 15., 58., 19., 60., 23., 62., 27., 64., 31., + let r = _mm512_mask_fmul_pch(src, 0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_sub_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_sub_ph(0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 0., -29., 0., -25., 0., -21., 0., -17., 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., - 0., 7., 0., 11., 0., 15., 0., 19., 0., 23., 0., 27., 0., 31., + unsafe fn test_mm512_maskz_fmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_fmul_pch(0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_sub_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set_ph( - -31.0, -29.0, -27.0, -25.0, -23.0, -21.0, -19.0, -17.0, -15.0, -13.0, -11.0, -9.0, - -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, - 23.0, 25.0, 27.0, 29.0, 31.0, - ); + unsafe fn test_mm512_fmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_sub_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + unsafe fn test_mm512_mask_fmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, ); - let r = _mm512_mask_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, - 0b01010101010101010101010101010101, + 0b0101010101010101, a, b, ); - let e = _mm512_set_ph( - 34., -29., 36., -25., 38., -21., 40., -17., 42., -13., 44., -9., 46., -5., 48., -1., - 50., 3., 52., 7., 54., 11., 56., 15., 58., 19., 60., 23., 62., 27., 64., 31., + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_sub_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, + unsafe fn test_mm512_maskz_fmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, a, b, ); - let e = _mm512_set_ph( - 0., -29., 0., -25., 0., -21., 0., -17., 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., - 0., 7., 0., 11., 0., 15., 0., 19., 0., 23., 0., 27., 0., 31., + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_sub_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_set_sh(-1.0); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_fmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_sub_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_mask_fmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 0, a, b, ); - let e = _mm_set_sh(4.0); - assert_eq_m128h(r, e); - let r = _mm_mask_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_set_sh(-1.0); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_sub_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = - _mm_maskz_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_set_sh(0.0); - assert_eq_m128h(r, e); + unsafe fn test_mm_maskz_fmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); let r = - _mm_maskz_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_set_sh(-1.0); + _mm_maskz_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_sub_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_sub_sh(a, b); - let e = _mm_set_sh(-1.0); + unsafe fn test_mm_fmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_fmul_sch(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_sub_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_sub_sh(src, 0, a, b); - let e = _mm_set_sh(4.0); - assert_eq_m128h(r, e); - let r = _mm_mask_sub_sh(src, 1, a, b); - let e = _mm_set_sh(-1.0); + unsafe fn test_mm_mask_fmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_fmul_sch(src, 0, a, b); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_sub_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_maskz_sub_sh(0, a, b); - let e = _mm_set_sh(0.0); - assert_eq_m128h(r, e); - let r = _mm_maskz_sub_sh(1, a, b); - let e = _mm_set_sh(-1.0); + unsafe fn test_mm_maskz_fmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_maskz_fmul_sch(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mul_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_mul_ph(a, b); - let e = _mm_set_ph(8.0, 14.0, 18.0, 20.0, 20.0, 18.0, 14.0, 8.0); + unsafe fn test_mm_cmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let r = _mm_cmul_pch(a, b); + let e = _mm_set1_pch(-1.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_mul_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm_mask_mul_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(10., 14., 12., 20., 14., 18., 16., 8.); + unsafe fn test_mm_mask_cmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); + let r = _mm_mask_cmul_pch(src, 0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_mul_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_maskz_mul_ph(0b01010101, a, b); - let e = _mm_set_ph(0., 14., 0., 20., 0., 18., 0., 8.); + unsafe fn test_mm_maskz_cmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let r = _mm_maskz_cmul_pch(0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mul_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_mul_ph(a, b); - let e = _mm256_set_ph( - 16.0, 30.0, 42.0, 52.0, 60.0, 66.0, 70.0, 72.0, 72.0, 70.0, 66.0, 60.0, 52.0, 42.0, - 30.0, 16.0, - ); + unsafe fn test_mm256_cmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let r = _mm256_cmul_pch(a, b); + let e = _mm256_set1_pch(-1.0, 0.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_mul_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let src = _mm256_set_ph( - 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + unsafe fn test_mm256_mask_cmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let src = _mm256_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, ); - let r = _mm256_mask_mul_ph(src, 0b0101010101010101, a, b); - let e = _mm256_set_ph( - 18., 30., 20., 52., 22., 66., 24., 72., 26., 70., 28., 60., 30., 42., 32., 16., + let r = _mm256_mask_cmul_pch(src, 0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_mul_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_maskz_mul_ph(0b0101010101010101, a, b); - let e = _mm256_set_ph( - 0., 30., 0., 52., 0., 66., 0., 72., 0., 70., 0., 60., 0., 42., 0., 16., + unsafe fn test_mm256_maskz_cmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let r = _mm256_maskz_cmul_pch(0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mul_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_mul_ph(a, b); - let e = _mm512_set_ph( - 32.0, 62.0, 90.0, 116.0, 140.0, 162.0, 182.0, 200.0, 216.0, 230.0, 242.0, 252.0, 260.0, - 266.0, 270.0, 272.0, 272.0, 270.0, 266.0, 260.0, 252.0, 242.0, 230.0, 216.0, 200.0, - 182.0, 162.0, 140.0, 116.0, 90.0, 62.0, 32.0, - ); + unsafe fn test_mm512_cmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_cmul_pch(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_mul_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + unsafe fn test_mm512_mask_cmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, ); - let r = _mm512_mask_mul_ph(src, 0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 34., 62., 36., 116., 38., 162., 40., 200., 42., 230., 44., 252., 46., 266., 48., 272., - 50., 270., 52., 260., 54., 242., 56., 216., 58., 182., 60., 140., 62., 90., 64., 32., + let r = _mm512_mask_cmul_pch(src, 0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_mul_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_mul_ph(0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 0., 62., 0., 116., 0., 162., 0., 200., 0., 230., 0., 252., 0., 266., 0., 272., 0., - 270., 0., 260., 0., 242., 0., 216., 0., 182., 0., 140., 0., 90., 0., 32., + unsafe fn test_mm512_maskz_cmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_maskz_cmul_pch(0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mul_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set_ph( - 32.0, 62.0, 90.0, 116.0, 140.0, 162.0, 182.0, 200.0, 216.0, 230.0, 242.0, 252.0, 260.0, - 266.0, 270.0, 272.0, 272.0, 270.0, 266.0, 260.0, 252.0, 242.0, 230.0, 216.0, 200.0, - 182.0, 162.0, 140.0, 116.0, 90.0, 62.0, 32.0, - ); + unsafe fn test_mm512_cmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_mul_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + unsafe fn test_mm512_mask_cmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, ); - let r = _mm512_mask_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, - 0b01010101010101010101010101010101, + 0b0101010101010101, a, b, ); - let e = _mm512_set_ph( - 34., 62., 36., 116., 38., 162., 40., 200., 42., 230., 44., 252., 46., 266., 48., 272., - 50., 270., 52., 260., 54., 242., 56., 216., 58., 182., 60., 140., 62., 90., 64., 32., + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_mul_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, + unsafe fn test_mm512_maskz_cmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_maskz_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, a, b, ); - let e = _mm512_set_ph( - 0., 62., 0., 116., 0., 162., 0., 200., 0., 230., 0., 252., 0., 266., 0., 272., 0., - 270., 0., 260., 0., 242., 0., 216., 0., 182., 0., 140., 0., 90., 0., 32., + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mul_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_set_sh(2.0); + unsafe fn test_mm_cmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_cmul_sch(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_mul_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_set_sh(4.0); - assert_eq_m128h(r, e); - let r = _mm_mask_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_set_sh(2.0); + unsafe fn test_mm_mask_cmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_cmul_sch(src, 0, a, b); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_mul_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = - _mm_maskz_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_set_sh(0.0); - assert_eq_m128h(r, e); - let r = - _mm_maskz_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_set_sh(2.0); + unsafe fn test_mm_maskz_cmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_maskz_cmul_sch(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mul_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_mul_sh(a, b); - let e = _mm_set_sh(2.0); + unsafe fn test_mm_cmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_mul_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_mul_sh(src, 0, a, b); - let e = _mm_set_sh(4.0); - assert_eq_m128h(r, e); - let r = _mm_mask_mul_sh(src, 1, a, b); - let e = _mm_set_sh(2.0); + unsafe fn test_mm_mask_cmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_mul_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_maskz_mul_sh(0, a, b); - let e = _mm_set_sh(0.0); - assert_eq_m128h(r, e); - let r = _mm_maskz_mul_sh(1, a, b); - let e = _mm_set_sh(2.0); + unsafe fn test_mm_maskz_cmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = + _mm_maskz_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_div_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let r = _mm_div_ph(a, b); - let e = _mm_set1_ph(0.5); + unsafe fn test_mm_fcmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let r = _mm_fcmul_pch(a, b); + let e = _mm_set1_pch(-1.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_div_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let src = _mm_set_ph(4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0); - let r = _mm_mask_div_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5); + unsafe fn test_mm_mask_fcmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); + let r = _mm_mask_fcmul_pch(src, 0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_div_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let r = _mm_maskz_div_ph(0b01010101, a, b); - let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); + unsafe fn test_mm_maskz_fcmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let r = _mm_maskz_fcmul_pch(0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_div_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let r = _mm256_div_ph(a, b); - let e = _mm256_set1_ph(0.5); + unsafe fn test_mm256_fcmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let r = _mm256_fcmul_pch(a, b); + let e = _mm256_set1_pch(-1.0, 0.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_div_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let src = _mm256_set_ph( - 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, - 19.0, + unsafe fn test_mm256_mask_fcmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let src = _mm256_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, ); - let r = _mm256_mask_div_ph(src, 0b0101010101010101, a, b); - let e = _mm256_set_ph( - 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, + let r = _mm256_mask_fcmul_pch(src, 0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_div_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let r = _mm256_maskz_div_ph(0b0101010101010101, a, b); - let e = _mm256_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + unsafe fn test_mm256_maskz_fcmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let r = _mm256_maskz_fcmul_pch(0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_div_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let r = _mm512_div_ph(a, b); - let e = _mm512_set1_ph(0.5); + unsafe fn test_mm512_fcmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_fcmul_pch(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_div_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let src = _mm512_set_ph( - 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, - 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, - 33.0, 34.0, 35.0, + unsafe fn test_mm512_mask_fcmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, ); - let r = _mm512_mask_div_ph(src, 0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, - 20.0, 0.5, 22.0, 0.5, 24.0, 0.5, 26.0, 0.5, 28.0, 0.5, 30.0, 0.5, 32.0, 0.5, 34.0, 0.5, + let r = _mm512_mask_fcmul_pch(src, 0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_div_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let r = _mm512_maskz_div_ph(0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, - 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + unsafe fn test_mm512_maskz_fcmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_maskz_fcmul_pch(0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_div_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let r = _mm512_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_ph(0.5); + unsafe fn test_mm512_fcmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_div_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let src = _mm512_set_ph( - 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, - 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, - 33.0, 34.0, 35.0, + unsafe fn test_mm512_mask_fcmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, ); - let r = _mm512_mask_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, - 0b01010101010101010101010101010101, + 0b0101010101010101, a, b, ); - let e = _mm512_set_ph( - 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, - 20.0, 0.5, 22.0, 0.5, 24.0, 0.5, 26.0, 0.5, 28.0, 0.5, 30.0, 0.5, 32.0, 0.5, 34.0, 0.5, + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_div_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let r = _mm512_maskz_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, + unsafe fn test_mm512_maskz_fcmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_maskz_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, a, b, ); - let e = _mm512_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, - 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_div_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_set_sh(0.5); + unsafe fn test_mm_fcmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_fcmul_sch(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_div_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_set_sh(4.0); - assert_eq_m128h(r, e); - let r = _mm_mask_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_set_sh(0.5); + unsafe fn test_mm_mask_fcmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_fcmul_sch(src, 0, a, b); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_div_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = - _mm_maskz_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_set_sh(0.0); - assert_eq_m128h(r, e); - let r = - _mm_maskz_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_set_sh(0.5); + unsafe fn test_mm_maskz_fcmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_maskz_fcmul_sch(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_div_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_div_sh(a, b); - let e = _mm_set_sh(0.5); + unsafe fn test_mm_fcmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_div_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_div_sh(src, 0, a, b); - let e = _mm_set_sh(4.0); - assert_eq_m128h(r, e); - let r = _mm_mask_div_sh(src, 1, a, b); - let e = _mm_set_sh(0.5); + unsafe fn test_mm_mask_fcmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_div_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_maskz_div_sh(0, a, b); - let e = _mm_set_sh(0.0); + unsafe fn test_mm_maskz_fcmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = + _mm_maskz_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); - let r = _mm_maskz_div_sh(1, a, b); - let e = _mm_set_sh(0.5); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_abs_ph() { + let a = _mm_set_ph(-1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0); + let r = _mm_abs_ph(a); + let e = _mm_set_ph(1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mul_pch() { + unsafe fn test_mm256_abs_ph() { + let a = _mm256_set_ph( + -1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, + -14.0, + ); + let r = _mm256_abs_ph(a); + let e = _mm256_set_ph( + 1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_abs_ph() { + let a = _mm512_set_ph( + -1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, + -14.0, 15.0, -16.0, 17.0, -18.0, 19.0, -20.0, 21.0, -22.0, 23.0, -24.0, 25.0, -26.0, + 27.0, -28.0, 29.0, -30.0, + ); + let r = _mm512_abs_ph(a); + let e = _mm512_set_ph( + 1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, + 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, + 29.0, 30.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_conj_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); - let r = _mm_mul_pch(a, b); - let e = _mm_set1_pch(-1.0, 0.0); + let r = _mm_conj_pch(a); + let e = _mm_set1_pch(0.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_mul_pch() { + unsafe fn test_mm_mask_conj_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); - let r = _mm_mask_mul_pch(src, 0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); + let r = _mm_mask_conj_pch(src, 0b0101, a); + let e = _mm_setr_ph(0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_mul_pch() { + unsafe fn test_mm_maskz_conj_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); - let r = _mm_maskz_mul_pch(0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); + let r = _mm_maskz_conj_pch(0b0101, a); + let e = _mm_setr_ph(0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mul_pch() { + unsafe fn test_mm256_conj_pch() { let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_mul_pch(a, b); - let e = _mm256_set1_pch(-1.0, 0.0); + let r = _mm256_conj_pch(a); + let e = _mm256_set1_pch(0.0, -1.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_mul_pch() { + unsafe fn test_mm256_mask_conj_pch() { let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); let src = _mm256_setr_ph( 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, ); - let r = _mm256_mask_mul_pch(src, 0b01010101, a, b); - let e = _mm256_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - ); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_mul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_maskz_mul_pch(0b01010101, a, b); + let r = _mm256_mask_conj_pch(src, 0b01010101, a); let e = _mm256_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + 0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0, 0.0, -1.0, 12.0, 13.0, 0.0, -1.0, 16.0, 17.0, ); assert_eq_m256h(r, e); } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_mul_pch(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_mul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, - ); - let r = _mm512_mask_mul_pch(src, 0b0101010101010101, a, b); - let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, - ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_mul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_maskz_mul_pch(0b0101010101010101, a, b); - let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_conj_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_maskz_conj_pch(0b01010101, a); + let e = _mm256_setr_ph( + 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, ); - assert_eq_m512h(r, e); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mul_round_pch() { + unsafe fn test_mm512_conj_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + let r = _mm512_conj_pch(a); + let e = _mm512_set1_pch(0.0, -1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_mul_round_pch() { + unsafe fn test_mm512_mask_conj_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); let src = _mm512_setr_ph( 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, ); - let r = _mm512_mask_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, - 0b0101010101010101, - a, - b, - ); + let r = _mm512_mask_conj_pch(src, 0b0101010101010101, a); let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0, 0.0, -1.0, 12.0, 13.0, 0.0, -1.0, 16.0, 17.0, + 0.0, -1.0, 20.0, 21.0, 0.0, -1.0, 24.0, 25.0, 0.0, -1.0, 28.0, 29.0, 0.0, -1.0, 32.0, 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_mul_round_pch() { + unsafe fn test_mm512_maskz_conj_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_maskz_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b0101010101010101, - a, - b, - ); + let r = _mm512_maskz_conj_pch(0b0101010101010101, a); let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, + 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_mul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_mul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = - _mm_maskz_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_mul_sch(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_mul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_mul_sch(src, 0, a, b); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_mul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_maskz_mul_sch(0, a, b); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_fmadd_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_fmadd_pch(a, b, c); + let e = _mm_set1_pch(-2.0, 3.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmul_pch() { + unsafe fn test_mm_mask_fmadd_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); - let r = _mm_fmul_pch(a, b); - let e = _mm_set1_pch(-1.0, 0.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_mask_fmadd_pch(a, 0b0101, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmul_pch() { + unsafe fn test_mm_mask3_fmadd_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); - let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); - let r = _mm_mask_fmul_pch(src, 0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_mask3_fmadd_pch(a, b, c, 0b0101); + let e = _mm_setr_ph(-2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmul_pch() { + unsafe fn test_mm_maskz_fmadd_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); - let r = _mm_maskz_fmul_pch(0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_maskz_fmadd_pch(0b0101, a, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmul_pch() { + unsafe fn test_mm256_fmadd_pch() { let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_fmul_pch(a, b); - let e = _mm256_set1_pch(-1.0, 0.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_fmadd_pch(a, b, c); + let e = _mm256_set1_pch(-2.0, 3.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmul_pch() { + unsafe fn test_mm256_mask_fmadd_pch() { let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); - let src = _mm256_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_mask_fmadd_pch(a, 0b01010101, b, c); + let e = _mm256_setr_ph( + -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, ); - let r = _mm256_mask_fmul_pch(src, 0b01010101, a, b); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask3_fmadd_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_mask3_fmadd_pch(a, b, c, 0b01010101); let e = _mm256_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmul_pch() { + unsafe fn test_mm256_maskz_fmadd_pch() { let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_maskz_fmul_pch(0b01010101, a, b); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_maskz_fmadd_pch(0b01010101, a, b, c); let e = _mm256_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmul_pch() { + unsafe fn test_mm512_fmadd_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_fmul_pch(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_fmadd_pch(a, b, c); + let e = _mm512_set1_pch(-2.0, 3.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmul_pch() { + unsafe fn test_mm512_mask_fmadd_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask_fmadd_pch(a, 0b0101010101010101, b, c); + let e = _mm512_setr_ph( + -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, + -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, ); - let r = _mm512_mask_fmul_pch(src, 0b0101010101010101, a, b); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask3_fmadd_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask3_fmadd_pch(a, b, c, 0b0101010101010101); let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, + -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmul_pch() { + unsafe fn test_mm512_maskz_fmadd_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_maskz_fmul_pch(0b0101010101010101, a, b); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_maskz_fmadd_pch(0b0101010101010101, a, b, c); let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, + -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmul_round_pch() { + unsafe fn test_mm512_fmadd_round_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = + _mm512_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_pch(-2.0, 3.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmul_round_pch() { + unsafe fn test_mm512_mask_fmadd_round_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, - ); - let r = _mm512_mask_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0b0101010101010101, + b, + c, + ); + let e = _mm512_setr_ph( + -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, + -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask3_fmadd_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask3_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, + c, + 0b0101010101010101, ); let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, + -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmul_round_pch() { + unsafe fn test_mm512_maskz_fmadd_round_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_maskz_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_maskz_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b0101010101010101, a, b, + c, ); let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, + -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmul_round_sch() { + unsafe fn test_mm_fmadd_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_fmadd_sch(a, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmul_round_sch() { + unsafe fn test_mm_mask_fmadd_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask_fmadd_sch(a, 0, b, c); + let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + let r = _mm_mask_fmadd_sch(a, 1, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmul_round_sch() { + unsafe fn test_mm_mask3_fmadd_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = - _mm_maskz_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask3_fmadd_sch(a, b, c, 0); + let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + let r = _mm_mask3_fmadd_sch(a, b, c, 1); + let e = _mm_setr_ph(-2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_fmadd_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_maskz_fmadd_sch(0, a, b, c); let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); + let r = _mm_maskz_fmadd_sch(1, a, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmul_sch() { + unsafe fn test_mm_fmadd_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_fmul_sch(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmul_sch() { + unsafe fn test_mm_mask_fmadd_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_fmul_sch(src, 0, a, b); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0, b, c, + ); + let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + let r = _mm_mask_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 1, b, c, + ); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmul_sch() { + unsafe fn test_mm_mask3_fmadd_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_maskz_fmul_sch(0, a, b); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask3_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 0, + ); + let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + let r = _mm_mask3_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 1, + ); + let e = _mm_setr_ph(-2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_fmadd_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_maskz_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0, a, b, c, + ); let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); + let r = _mm_maskz_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 1, a, b, c, + ); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_cmul_pch() { + unsafe fn test_mm_fcmadd_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let r = _mm_cmul_pch(a, b); - let e = _mm_set1_pch(-1.0, 0.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_fcmadd_pch(a, b, c); + let e = _mm_set1_pch(2.0, 3.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_cmul_pch() { + unsafe fn test_mm_mask_fcmadd_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); - let r = _mm_mask_cmul_pch(src, 0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_mask_fcmadd_pch(a, 0b0101, b, c); + let e = _mm_setr_ph(2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_cmul_pch() { + unsafe fn test_mm_mask3_fcmadd_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let r = _mm_maskz_cmul_pch(0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_mask3_fcmadd_pch(a, b, c, 0b0101); + let e = _mm_setr_ph(2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_cmul_pch() { + unsafe fn test_mm_maskz_fcmadd_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_maskz_fcmadd_pch(0b0101, a, b, c); + let e = _mm_setr_ph(2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_fcmadd_pch() { let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let r = _mm256_cmul_pch(a, b); - let e = _mm256_set1_pch(-1.0, 0.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_fcmadd_pch(a, b, c); + let e = _mm256_set1_pch(2.0, 3.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_cmul_pch() { + unsafe fn test_mm256_mask_fcmadd_pch() { let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let src = _mm256_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_mask_fcmadd_pch(a, 0b01010101, b, c); + let e = _mm256_setr_ph( + 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, ); - let r = _mm256_mask_cmul_pch(src, 0b01010101, a, b); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask3_fcmadd_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_mask3_fcmadd_pch(a, b, c, 0b01010101); let e = _mm256_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_cmul_pch() { + unsafe fn test_mm256_maskz_fcmadd_pch() { let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let r = _mm256_maskz_cmul_pch(0b01010101, a, b); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_maskz_fcmadd_pch(0b01010101, a, b, c); let e = _mm256_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cmul_pch() { + unsafe fn test_mm512_fcmadd_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_cmul_pch(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_fcmadd_pch(a, b, c); + let e = _mm512_set1_pch(2.0, 3.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cmul_pch() { + unsafe fn test_mm512_mask_fcmadd_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask_fcmadd_pch(a, 0b0101010101010101, b, c); + let e = _mm512_setr_ph( + 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, + 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, ); - let r = _mm512_mask_cmul_pch(src, 0b0101010101010101, a, b); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask3_fcmadd_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask3_fcmadd_pch(a, b, c, 0b0101010101010101); let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, + 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_cmul_pch() { + unsafe fn test_mm512_maskz_fcmadd_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_maskz_cmul_pch(0b0101010101010101, a, b); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_maskz_fcmadd_pch(0b0101010101010101, a, b, c); let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, + 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cmul_round_pch() { + unsafe fn test_mm512_fcmadd_round_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = + _mm512_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_pch(2.0, 3.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cmul_round_pch() { + unsafe fn test_mm512_mask_fcmadd_round_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, - ); - let r = _mm512_mask_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0b0101010101010101, + b, + c, + ); + let e = _mm512_setr_ph( + 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, + 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask3_fcmadd_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask3_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, + c, + 0b0101010101010101, ); let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, + 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_cmul_round_pch() { + unsafe fn test_mm512_maskz_fcmadd_round_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_maskz_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_maskz_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b0101010101010101, a, b, + c, ); let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, + 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cmul_sch() { + unsafe fn test_mm_fcmadd_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_cmul_sch(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_fcmadd_sch(a, b, c); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_cmul_sch() { + unsafe fn test_mm_mask_fcmadd_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_cmul_sch(src, 0, a, b); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask_fcmadd_sch(a, 0, b, c); + let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + let r = _mm_mask_fcmadd_sch(a, 1, b, c); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_cmul_sch() { + unsafe fn test_mm_mask3_fcmadd_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_maskz_cmul_sch(0, a, b); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask3_fcmadd_sch(a, b, c, 0); + let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + let r = _mm_mask3_fcmadd_sch(a, b, c, 1); + let e = _mm_setr_ph(2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_fcmadd_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_maskz_fcmadd_sch(0, a, b, c); let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); + let r = _mm_maskz_fcmadd_sch(1, a, b, c); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cmul_round_sch() { + unsafe fn test_mm_fcmadd_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_cmul_round_sch() { + unsafe fn test_mm_mask_fcmadd_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0, b, c, ); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + let r = _mm_mask_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 1, b, c, + ); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_cmul_round_sch() { + unsafe fn test_mm_mask3_fcmadd_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = - _mm_maskz_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask3_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 0, + ); + let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + let r = _mm_mask3_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 1, + ); + let e = _mm_setr_ph(2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_fcmadd_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_maskz_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0, a, b, c, + ); let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); + let r = _mm_maskz_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 1, a, b, c, + ); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fcmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let r = _mm_fcmul_pch(a, b); - let e = _mm_set1_pch(-1.0, 0.0); + unsafe fn test_mm_fmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_fmadd_ph(a, b, c); + let e = _mm_set1_ph(5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fcmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); - let r = _mm_mask_fcmul_pch(src, 0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); + unsafe fn test_mm_mask_fmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask_fmadd_ph(a, 0b01010101, b, c); + let e = _mm_set_ph(1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fcmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let r = _mm_maskz_fcmul_pch(0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); + unsafe fn test_mm_mask3_fmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask3_fmadd_ph(a, b, c, 0b01010101); + let e = _mm_set_ph(3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fcmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let r = _mm256_fcmul_pch(a, b); - let e = _mm256_set1_pch(-1.0, 0.0); + unsafe fn test_mm_maskz_fmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_maskz_fmadd_ph(0b01010101, a, b, c); + let e = _mm_set_ph(0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_fmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_fmadd_ph(a, b, c); + let e = _mm256_set1_ph(5.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fcmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let src = _mm256_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + unsafe fn test_mm256_mask_fmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask_fmadd_ph(a, 0b0101010101010101, b, c); + let e = _mm256_set_ph( + 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, ); - let r = _mm256_mask_fcmul_pch(src, 0b01010101, a, b); - let e = _mm256_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask3_fmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask3_fmadd_ph(a, b, c, 0b0101010101010101); + let e = _mm256_set_ph( + 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fcmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let r = _mm256_maskz_fcmul_pch(0b01010101, a, b); - let e = _mm256_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + unsafe fn test_mm256_maskz_fmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_maskz_fmadd_ph(0b0101010101010101, a, b, c); + let e = _mm256_set_ph( + 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fcmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_fcmul_pch(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + unsafe fn test_mm512_fmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fmadd_ph(a, b, c); + let e = _mm512_set1_ph(5.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fcmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, + unsafe fn test_mm512_mask_fmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmadd_ph(a, 0b01010101010101010101010101010101, b, c); + let e = _mm512_set_ph( + 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, + 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, ); - let r = _mm512_mask_fcmul_pch(src, 0b0101010101010101, a, b); - let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask3_fmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmadd_ph(a, b, c, 0b01010101010101010101010101010101); + let e = _mm512_set_ph( + 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, + 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fcmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_maskz_fcmul_pch(0b0101010101010101, a, b); - let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + unsafe fn test_mm512_maskz_fmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmadd_ph(0b01010101010101010101010101010101, a, b, c); + let e = _mm512_set_ph( + 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, + 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fcmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + unsafe fn test_mm512_fmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_ph(5.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fcmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, - ); - let r = _mm512_mask_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, - 0b0101010101010101, + unsafe fn test_mm512_mask_fmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, + 0b01010101010101010101010101010101, b, + c, ); - let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + let e = _mm512_set_ph( + 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, + 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fcmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_maskz_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b0101010101010101, + unsafe fn test_mm512_mask3_fmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, + c, + 0b01010101010101010101010101010101, ); - let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + let e = _mm512_set_ph( + 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, + 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fcmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_fcmul_sch(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fcmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_fcmul_sch(src, 0, a, b); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_maskz_fmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + c, + ); + let e = _mm512_set_ph( + 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, + 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fcmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_maskz_fcmul_sch(0, a, b); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_fmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fmadd_sh(a, b, c); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fcmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_mask_fmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fmadd_sh(a, 0, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fcmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let r = _mm_mask_fmadd_sh(a, 1, b, c); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fcmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = - _mm_maskz_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_mask3_fmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fmadd_sh(a, b, c, 0); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_abs_ph() { - let a = _mm_set_ph(-1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0); - let r = _mm_abs_ph(a); - let e = _mm_set_ph(1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0); + let r = _mm_mask3_fmadd_sh(a, b, c, 1); + let e = _mm_setr_ph(5.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_abs_ph() { - let a = _mm256_set_ph( - -1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, - -14.0, - ); - let r = _mm256_abs_ph(a); - let e = _mm256_set_ph( - 1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, - ); - assert_eq_m256h(r, e); - } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_abs_ph() { - let a = _mm512_set_ph( - -1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, - -14.0, 15.0, -16.0, 17.0, -18.0, 19.0, -20.0, 21.0, -22.0, 23.0, -24.0, 25.0, -26.0, - 27.0, -28.0, 29.0, -30.0, - ); - let r = _mm512_abs_ph(a); - let e = _mm512_set_ph( - 1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, - 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, - 29.0, 30.0, - ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_conj_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let r = _mm_conj_pch(a); - let e = _mm_set1_pch(0.0, -1.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_conj_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); - let r = _mm_mask_conj_pch(src, 0b0101, a); - let e = _mm_setr_ph(0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0); + unsafe fn test_mm_maskz_fmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fmadd_sh(0, a, b, c); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_conj_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let r = _mm_maskz_conj_pch(0b0101, a); - let e = _mm_setr_ph(0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0); + let r = _mm_maskz_fmadd_sh(1, a, b, c); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_conj_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_conj_pch(a); - let e = _mm256_set1_pch(0.0, -1.0); - assert_eq_m256h(r, e); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_fmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_conj_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let src = _mm256_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - ); - let r = _mm256_mask_conj_pch(src, 0b01010101, a); - let e = _mm256_setr_ph( - 0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0, 0.0, -1.0, 12.0, 13.0, 0.0, -1.0, 16.0, 17.0, + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_fmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0, b, c, ); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_conj_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_maskz_conj_pch(0b01010101, a); - let e = _mm256_setr_ph( - 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_mask_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 1, b, c, ); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_conj_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_conj_pch(a); - let e = _mm512_set1_pch(0.0, -1.0); - assert_eq_m512h(r, e); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_conj_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, + unsafe fn test_mm_mask3_fmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 0, ); - let r = _mm512_mask_conj_pch(src, 0b0101010101010101, a); - let e = _mm512_setr_ph( - 0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0, 0.0, -1.0, 12.0, 13.0, 0.0, -1.0, 16.0, 17.0, - 0.0, -1.0, 20.0, 21.0, 0.0, -1.0, 24.0, 25.0, 0.0, -1.0, 28.0, 29.0, 0.0, -1.0, 32.0, - 33.0, + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + assert_eq_m128h(r, e); + let r = _mm_mask3_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 1, ); - assert_eq_m512h(r, e); + let e = _mm_setr_ph(5.0, 30., 31., 32., 33., 34., 35., 36.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_conj_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_maskz_conj_pch(0b0101010101010101, a); - let e = _mm512_setr_ph( - 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, - 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, + unsafe fn test_mm_maskz_fmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0, a, b, c, ); - assert_eq_m512h(r, e); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_maskz_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 1, a, b, c, + ); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_fmadd_pch(a, b, c); - let e = _mm_set1_pch(-2.0, 3.0); + unsafe fn test_mm_fmsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_fmsub_ph(a, b, c); + let e = _mm_set1_ph(-1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_mask_fmadd_pch(a, 0b0101, b, c); - let e = _mm_setr_ph(-2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0); + unsafe fn test_mm_mask_fmsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask_fmsub_ph(a, 0b01010101, b, c); + let e = _mm_set_ph(1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_mask3_fmadd_pch(a, b, c, 0b0101); - let e = _mm_setr_ph(-2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0); + unsafe fn test_mm_mask3_fmsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask3_fmsub_ph(a, b, c, 0b01010101); + let e = _mm_set_ph(3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_maskz_fmadd_pch(0b0101, a, b, c); - let e = _mm_setr_ph(-2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0); + unsafe fn test_mm_maskz_fmsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_maskz_fmsub_ph(0b01010101, a, b, c); + let e = _mm_set_ph(0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_fmadd_pch(a, b, c); - let e = _mm256_set1_pch(-2.0, 3.0); + unsafe fn test_mm256_fmsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_fmsub_ph(a, b, c); + let e = _mm256_set1_ph(-1.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_mask_fmadd_pch(a, 0b01010101, b, c); - let e = _mm256_setr_ph( - -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, + unsafe fn test_mm256_mask_fmsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask_fmsub_ph(a, 0b0101010101010101, b, c); + let e = _mm256_set_ph( + 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_mask3_fmadd_pch(a, b, c, 0b01010101); - let e = _mm256_setr_ph( - -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, + unsafe fn test_mm256_mask3_fmsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask3_fmsub_ph(a, b, c, 0b0101010101010101); + let e = _mm256_set_ph( + 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_maskz_fmadd_pch(0b01010101, a, b, c); - let e = _mm256_setr_ph( - -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, + unsafe fn test_mm256_maskz_fmsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_maskz_fmsub_ph(0b0101010101010101, a, b, c); + let e = _mm256_set_ph( + 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_fmadd_pch(a, b, c); - let e = _mm512_set1_pch(-2.0, 3.0); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask_fmadd_pch(a, 0b0101010101010101, b, c); - let e = _mm512_setr_ph( - -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, - -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, - ); + unsafe fn test_mm512_fmsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fmsub_ph(a, b, c); + let e = _mm512_set1_ph(-1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask3_fmadd_pch(a, b, c, 0b0101010101010101); - let e = _mm512_setr_ph( - -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, - -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, + unsafe fn test_mm512_mask_fmsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmsub_ph(a, 0b01010101010101010101010101010101, b, c); + let e = _mm512_set_ph( + 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, + 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_maskz_fmadd_pch(0b0101010101010101, a, b, c); - let e = _mm512_setr_ph( - -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, - -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, + unsafe fn test_mm512_mask3_fmsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmsub_ph(a, b, c, 0b01010101010101010101010101010101); + let e = _mm512_set_ph( + 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, + 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = - _mm512_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set1_pch(-2.0, 3.0); + unsafe fn test_mm512_maskz_fmsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmsub_ph(0b01010101010101010101010101010101, a, b, c); + let e = _mm512_set_ph( + 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, + 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_fmsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_ph(-1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_fmsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, - 0b0101010101010101, + 0b01010101010101010101010101010101, b, c, ); - let e = _mm512_setr_ph( - -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, - -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, + let e = _mm512_set_ph( + 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, + 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask3_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask3_fmsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, - 0b0101010101010101, + 0b01010101010101010101010101010101, ); - let e = _mm512_setr_ph( - -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, - -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, + let e = _mm512_set_ph( + 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, + 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_maskz_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b0101010101010101, + unsafe fn test_mm512_maskz_fmsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, a, b, c, ); - let e = _mm512_setr_ph( - -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, - -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, + let e = _mm512_set_ph( + 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, + 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_fmadd_sch(a, b, c); - let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_fmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fmsub_sh(a, b, c); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask_fmadd_sch(a, 0, b, c); - let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_mask_fmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fmsub_sh(a, 0, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_fmadd_sch(a, 1, b, c); - let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let r = _mm_mask_fmsub_sh(a, 1, b, c); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask3_fmadd_sch(a, b, c, 0); - let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + unsafe fn test_mm_mask3_fmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fmsub_sh(a, b, c, 0); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - let r = _mm_mask3_fmadd_sch(a, b, c, 1); - let e = _mm_setr_ph(-2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask3_fmsub_sh(a, b, c, 1); + let e = _mm_setr_ph(-1.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_maskz_fmadd_sch(0, a, b, c); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_maskz_fmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fmsub_sh(0, a, b, c); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_fmadd_sch(1, a, b, c); - let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let r = _mm_maskz_fmsub_sh(1, a, b, c); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_fmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_mask_fmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, 0, b, c, ); - let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, 1, b, c, ); - let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask3_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_mask3_fmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, 0, ); - let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - let r = _mm_mask3_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask3_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, 1, ); - let e = _mm_setr_ph(-2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let e = _mm_setr_ph(-1.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_maskz_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_maskz_fmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0, a, b, c, ); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_maskz_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 1, a, b, c, ); - let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fcmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_fcmadd_pch(a, b, c); - let e = _mm_set1_pch(2.0, 3.0); + unsafe fn test_mm_fnmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_fnmadd_ph(a, b, c); + let e = _mm_set1_ph(1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fcmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_mask_fcmadd_pch(a, 0b0101, b, c); - let e = _mm_setr_ph(2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0); + unsafe fn test_mm_mask_fnmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask_fnmadd_ph(a, 0b01010101, b, c); + let e = _mm_set_ph(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fcmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_mask3_fcmadd_pch(a, b, c, 0b0101); - let e = _mm_setr_ph(2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0); + unsafe fn test_mm_mask3_fnmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask3_fnmadd_ph(a, b, c, 0b01010101); + let e = _mm_set_ph(3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fcmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_maskz_fcmadd_pch(0b0101, a, b, c); - let e = _mm_setr_ph(2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0); + unsafe fn test_mm_maskz_fnmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_maskz_fnmadd_ph(0b01010101, a, b, c); + let e = _mm_set_ph(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fcmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_fcmadd_pch(a, b, c); - let e = _mm256_set1_pch(2.0, 3.0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_fnmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_fnmadd_ph(a, b, c); + let e = _mm256_set1_ph(1.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fcmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_mask_fcmadd_pch(a, 0b01010101, b, c); - let e = _mm256_setr_ph( - 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, + unsafe fn test_mm256_mask_fnmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask_fnmadd_ph(a, 0b0101010101010101, b, c); + let e = _mm256_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fcmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_mask3_fcmadd_pch(a, b, c, 0b01010101); - let e = _mm256_setr_ph( - 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, + unsafe fn test_mm256_mask3_fnmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask3_fnmadd_ph(a, b, c, 0b0101010101010101); + let e = _mm256_set_ph( + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fcmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_maskz_fcmadd_pch(0b01010101, a, b, c); - let e = _mm256_setr_ph( - 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, + unsafe fn test_mm256_maskz_fnmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_maskz_fnmadd_ph(0b0101010101010101, a, b, c); + let e = _mm256_set_ph( + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fcmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_fcmadd_pch(a, b, c); - let e = _mm512_set1_pch(2.0, 3.0); + unsafe fn test_mm512_fnmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fnmadd_ph(a, b, c); + let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fcmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask_fcmadd_pch(a, 0b0101010101010101, b, c); - let e = _mm512_setr_ph( - 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, - 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, + unsafe fn test_mm512_mask_fnmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fnmadd_ph(a, 0b01010101010101010101010101010101, b, c); + let e = _mm512_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fcmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask3_fcmadd_pch(a, b, c, 0b0101010101010101); - let e = _mm512_setr_ph( - 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, - 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, + unsafe fn test_mm512_mask3_fnmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fnmadd_ph(a, b, c, 0b01010101010101010101010101010101); + let e = _mm512_set_ph( + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, + 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fcmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_maskz_fcmadd_pch(0b0101010101010101, a, b, c); - let e = _mm512_setr_ph( - 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, - 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, + unsafe fn test_mm512_maskz_fnmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fnmadd_ph(0b01010101010101010101010101010101, a, b, c); + let e = _mm512_set_ph( + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fcmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); + unsafe fn test_mm512_fnmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); let r = - _mm512_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set1_pch(2.0, 3.0); + _mm512_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fcmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask_fnmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, - 0b0101010101010101, + 0b01010101010101010101010101010101, b, c, ); - let e = _mm512_setr_ph( - 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, - 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, + let e = _mm512_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fcmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask3_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask3_fnmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, - 0b0101010101010101, + 0b01010101010101010101010101010101, ); - let e = _mm512_setr_ph( - 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, - 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, + let e = _mm512_set_ph( + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, + 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fcmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_maskz_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b0101010101010101, + unsafe fn test_mm512_maskz_fnmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, a, b, c, ); - let e = _mm512_setr_ph( - 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, - 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, + let e = _mm512_set_ph( + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fcmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_fcmadd_sch(a, b, c); - let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_fnmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fnmadd_sh(a, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fcmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask_fcmadd_sch(a, 0, b, c); - let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_mask_fnmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fnmadd_sh(a, 0, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_fcmadd_sch(a, 1, b, c); - let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let r = _mm_mask_fnmadd_sh(a, 1, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fcmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask3_fcmadd_sch(a, b, c, 0); - let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + unsafe fn test_mm_mask3_fnmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fnmadd_sh(a, b, c, 0); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - let r = _mm_mask3_fcmadd_sch(a, b, c, 1); - let e = _mm_setr_ph(2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask3_fnmadd_sh(a, b, c, 1); + let e = _mm_setr_ph(1.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fcmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_maskz_fcmadd_sch(0, a, b, c); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_maskz_fnmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fnmadd_sh(0, a, b, c); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_fcmadd_sch(1, a, b, c); - let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let r = _mm_maskz_fnmadd_sh(1, a, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fcmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_fnmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fcmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_mask_fnmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, 0, b, c, ); - let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, 1, b, c, ); - let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fcmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask3_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_mask3_fnmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, 0, ); - let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - let r = _mm_mask3_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask3_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, 1, ); - let e = _mm_setr_ph(2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let e = _mm_setr_ph(1.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fcmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_maskz_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_maskz_fnmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0, a, b, c, ); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_maskz_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 1, a, b, c, ); - let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmadd_ph() { + unsafe fn test_mm_fnmsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_fmadd_ph(a, b, c); - let e = _mm_set1_ph(5.0); + let r = _mm_fnmsub_ph(a, b, c); + let e = _mm_set1_ph(-5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmadd_ph() { + unsafe fn test_mm_mask_fnmsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_mask_fmadd_ph(a, 0b01010101, b, c); - let e = _mm_set_ph(1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0); + let r = _mm_mask_fnmsub_ph(a, 0b01010101, b, c); + let e = _mm_set_ph(1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fmadd_ph() { + unsafe fn test_mm_mask3_fnmsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_mask3_fmadd_ph(a, b, c, 0b01010101); - let e = _mm_set_ph(3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0); + let r = _mm_mask3_fnmsub_ph(a, b, c, 0b01010101); + let e = _mm_set_ph(3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmadd_ph() { + unsafe fn test_mm_maskz_fnmsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_maskz_fmadd_ph(0b01010101, a, b, c); - let e = _mm_set_ph(0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0); + let r = _mm_maskz_fnmsub_ph(0b01010101, a, b, c); + let e = _mm_set_ph(0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmadd_ph() { + unsafe fn test_mm256_fnmsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_fmadd_ph(a, b, c); - let e = _mm256_set1_ph(5.0); + let r = _mm256_fnmsub_ph(a, b, c); + let e = _mm256_set1_ph(-5.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmadd_ph() { + unsafe fn test_mm256_mask_fnmsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_mask_fmadd_ph(a, 0b0101010101010101, b, c); + let r = _mm256_mask_fnmsub_ph(a, 0b0101010101010101, b, c); let e = _mm256_set_ph( - 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, + 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fmadd_ph() { + unsafe fn test_mm256_mask3_fnmsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_mask3_fmadd_ph(a, b, c, 0b0101010101010101); + let r = _mm256_mask3_fnmsub_ph(a, b, c, 0b0101010101010101); let e = _mm256_set_ph( - 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, + 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmadd_ph() { + unsafe fn test_mm256_maskz_fnmsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_maskz_fmadd_ph(0b0101010101010101, a, b, c); + let r = _mm256_maskz_fnmsub_ph(0b0101010101010101, a, b, c); let e = _mm256_set_ph( - 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, + 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmadd_ph() { + unsafe fn test_mm512_fnmsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_fmadd_ph(a, b, c); - let e = _mm512_set1_ph(5.0); + let r = _mm512_fnmsub_ph(a, b, c); + let e = _mm512_set1_ph(-5.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmadd_ph() { + unsafe fn test_mm512_mask_fnmsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmadd_ph(a, 0b01010101010101010101010101010101, b, c); + let r = _mm512_mask_fnmsub_ph(a, 0b01010101010101010101010101010101, b, c); let e = _mm512_set_ph( - 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, - 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, + 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, + 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmadd_ph() { + unsafe fn test_mm512_mask3_fnmsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmadd_ph(a, b, c, 0b01010101010101010101010101010101); + let r = _mm512_mask3_fnmsub_ph(a, b, c, 0b01010101010101010101010101010101); let e = _mm512_set_ph( - 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, - 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, + 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, + 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmadd_ph() { + unsafe fn test_mm512_maskz_fnmsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmadd_ph(0b01010101010101010101010101010101, a, b, c); + let r = _mm512_maskz_fnmsub_ph(0b01010101010101010101010101010101, a, b, c); let e = _mm512_set_ph( - 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, - 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, + 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, + 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmadd_round_ph() { + unsafe fn test_mm512_fnmsub_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set1_ph(5.0); + let r = + _mm512_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_ph(-5.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmadd_round_ph() { + unsafe fn test_mm512_mask_fnmsub_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, 0b01010101010101010101010101010101, b, c, ); let e = _mm512_set_ph( - 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, - 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, + 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, + 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmadd_round_ph() { + unsafe fn test_mm512_mask3_fnmsub_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask3_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, 0b01010101010101010101010101010101, ); let e = _mm512_set_ph( - 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, - 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, + 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, + 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmadd_round_ph() { + unsafe fn test_mm512_maskz_fnmsub_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_maskz_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b01010101010101010101010101010101, a, b, c, ); let e = _mm512_set_ph( - 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, - 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, + 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, + 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmadd_sh() { + unsafe fn test_mm_fnmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fmadd_sh(a, b, c); - let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_fnmsub_sh(a, b, c); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmadd_sh() { + unsafe fn test_mm_mask_fnmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fmadd_sh(a, 0, b, c); + let r = _mm_mask_fnmsub_sh(a, 0, b, c); let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_fmadd_sh(a, 1, b, c); - let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_mask_fnmsub_sh(a, 1, b, c); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fmadd_sh() { + unsafe fn test_mm_mask3_fnmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fmadd_sh(a, b, c, 0); + let r = _mm_mask3_fnmsub_sh(a, b, c, 0); let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - let r = _mm_mask3_fmadd_sh(a, b, c, 1); - let e = _mm_setr_ph(5.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fnmsub_sh(a, b, c, 1); + let e = _mm_setr_ph(-5.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmadd_sh() { + unsafe fn test_mm_maskz_fnmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fmadd_sh(0, a, b, c); + let r = _mm_maskz_fnmsub_sh(0, a, b, c); let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_fmadd_sh(1, a, b, c); - let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_maskz_fnmsub_sh(1, a, b, c); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmadd_round_sh() { + unsafe fn test_mm_fnmsub_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmadd_round_sh() { + unsafe fn test_mm_mask_fnmsub_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, 0, b, c, ); let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, 1, b, c, ); - let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fmadd_round_sh() { + unsafe fn test_mm_mask3_fnmsub_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask3_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, 0, ); let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - let r = _mm_mask3_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask3_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, 1, ); - let e = _mm_setr_ph(5.0, 30., 31., 32., 33., 34., 35., 36.); + let e = _mm_setr_ph(-5.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmadd_round_sh() { + unsafe fn test_mm_maskz_fnmsub_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_maskz_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0, a, b, c, ); let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_maskz_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 1, a, b, c, ); - let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmsub_ph() { + unsafe fn test_mm_fmaddsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_fmsub_ph(a, b, c); - let e = _mm_set1_ph(-1.0); + let r = _mm_fmaddsub_ph(a, b, c); + let e = _mm_set_ph(5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmsub_ph() { + unsafe fn test_mm_mask_fmaddsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_mask_fmsub_ph(a, 0b01010101, b, c); - let e = _mm_set_ph(1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0); + let r = _mm_mask_fmaddsub_ph(a, 0b00110011, b, c); + let e = _mm_set_ph(1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fmsub_ph() { + unsafe fn test_mm_mask3_fmaddsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_mask3_fmsub_ph(a, b, c, 0b01010101); - let e = _mm_set_ph(3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0); + let r = _mm_mask3_fmaddsub_ph(a, b, c, 0b00110011); + let e = _mm_set_ph(3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmsub_ph() { + unsafe fn test_mm_maskz_fmaddsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_maskz_fmsub_ph(0b01010101, a, b, c); - let e = _mm_set_ph(0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0); + let r = _mm_maskz_fmaddsub_ph(0b00110011, a, b, c); + let e = _mm_set_ph(0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmsub_ph() { + unsafe fn test_mm256_fmaddsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_fmsub_ph(a, b, c); - let e = _mm256_set1_ph(-1.0); + let r = _mm256_fmaddsub_ph(a, b, c); + let e = _mm256_set_ph( + 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, + ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmsub_ph() { + unsafe fn test_mm256_mask_fmaddsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_mask_fmsub_ph(a, 0b0101010101010101, b, c); + let r = _mm256_mask_fmaddsub_ph(a, 0b0011001100110011, b, c); let e = _mm256_set_ph( - 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, + 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fmsub_ph() { + unsafe fn test_mm256_mask3_fmaddsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_mask3_fmsub_ph(a, b, c, 0b0101010101010101); + let r = _mm256_mask3_fmaddsub_ph(a, b, c, 0b0011001100110011); let e = _mm256_set_ph( - 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, + 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmsub_ph() { + unsafe fn test_mm256_maskz_fmaddsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_maskz_fmsub_ph(0b0101010101010101, a, b, c); + let r = _mm256_maskz_fmaddsub_ph(0b0011001100110011, a, b, c); let e = _mm256_set_ph( - 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, + 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmsub_ph() { + unsafe fn test_mm512_fmaddsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_fmsub_ph(a, b, c); - let e = _mm512_set1_ph(-1.0); + let r = _mm512_fmaddsub_ph(a, b, c); + let e = _mm512_set_ph( + 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, + 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmsub_ph() { + unsafe fn test_mm512_mask_fmaddsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmsub_ph(a, 0b01010101010101010101010101010101, b, c); + let r = _mm512_mask_fmaddsub_ph(a, 0b00110011001100110011001100110011, b, c); let e = _mm512_set_ph( - 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, - 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, + 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, + 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmsub_ph() { + unsafe fn test_mm512_mask3_fmaddsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmsub_ph(a, b, c, 0b01010101010101010101010101010101); + let r = _mm512_mask3_fmaddsub_ph(a, b, c, 0b00110011001100110011001100110011); let e = _mm512_set_ph( - 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, - 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, + 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, + 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmsub_ph() { + unsafe fn test_mm512_maskz_fmaddsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmsub_ph(0b01010101010101010101010101010101, a, b, c); + let r = _mm512_maskz_fmaddsub_ph(0b00110011001100110011001100110011, a, b, c); let e = _mm512_set_ph( - 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, - 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, + 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, + 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmsub_round_ph() { + unsafe fn test_mm512_fmaddsub_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set1_ph(-1.0); + let r = + _mm512_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set_ph( + 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, + 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmsub_round_ph() { + unsafe fn test_mm512_mask_fmaddsub_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, - 0b01010101010101010101010101010101, + 0b00110011001100110011001100110011, b, c, ); let e = _mm512_set_ph( - 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, - 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, + 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, + 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmsub_round_ph() { + unsafe fn test_mm512_mask3_fmaddsub_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask3_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, - 0b01010101010101010101010101010101, + 0b00110011001100110011001100110011, ); let e = _mm512_set_ph( - 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, - 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, + 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, + 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmsub_round_ph() { + unsafe fn test_mm512_maskz_fmaddsub_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, + let r = _mm512_maskz_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b00110011001100110011001100110011, a, b, c, ); let e = _mm512_set_ph( - 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, - 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, + 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, + 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, ); assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fmsub_sh(a, b, c); - let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fmsub_sh(a, 0, b, c); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_fmsub_sh(a, 1, b, c); - let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fmsub_sh(a, b, c, 0); - let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - assert_eq_m128h(r, e); - let r = _mm_mask3_fmsub_sh(a, b, c, 1); - let e = _mm_setr_ph(-1.0, 30., 31., 32., 33., 34., 35., 36.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fmsub_sh(0, a, b, c); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_maskz_fmsub_sh(1, a, b, c); - let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 0, b, c, - ); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 1, b, c, - ); - let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 0, - ); - let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - assert_eq_m128h(r, e); - let r = _mm_mask3_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 1, - ); - let e = _mm_setr_ph(-1.0, 30., 31., 32., 33., 34., 35., 36.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0, a, b, c, - ); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_maskz_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 1, a, b, c, - ); - let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fnmadd_ph() { + unsafe fn test_mm_fmsubadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_fnmadd_ph(a, b, c); - let e = _mm_set1_ph(1.0); + let r = _mm_fmsubadd_ph(a, b, c); + let e = _mm_set_ph(-1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fnmadd_ph() { + unsafe fn test_mm_mask_fmsubadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_mask_fnmadd_ph(a, 0b01010101, b, c); - let e = _mm_set_ph(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0); + let r = _mm_mask_fmsubadd_ph(a, 0b00110011, b, c); + let e = _mm_set_ph(1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fnmadd_ph() { + unsafe fn test_mm_mask3_fmsubadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_mask3_fnmadd_ph(a, b, c, 0b01010101); - let e = _mm_set_ph(3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0); + let r = _mm_mask3_fmsubadd_ph(a, b, c, 0b00110011); + let e = _mm_set_ph(3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fnmadd_ph() { + unsafe fn test_mm_maskz_fmsubadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_maskz_fnmadd_ph(0b01010101, a, b, c); - let e = _mm_set_ph(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); + let r = _mm_maskz_fmsubadd_ph(0b00110011, a, b, c); + let e = _mm_set_ph(0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fnmadd_ph() { + unsafe fn test_mm256_fmsubadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_fnmadd_ph(a, b, c); - let e = _mm256_set1_ph(1.0); + let r = _mm256_fmsubadd_ph(a, b, c); + let e = _mm256_set_ph( + -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fnmadd_ph() { + unsafe fn test_mm256_mask_fmsubadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_mask_fnmadd_ph(a, 0b0101010101010101, b, c); + let r = _mm256_mask_fmsubadd_ph(a, 0b0011001100110011, b, c); let e = _mm256_set_ph( - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fnmadd_ph() { + unsafe fn test_mm256_mask3_fmsubadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_mask3_fnmadd_ph(a, b, c, 0b0101010101010101); + let r = _mm256_mask3_fmsubadd_ph(a, b, c, 0b0011001100110011); let e = _mm256_set_ph( - 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, + 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fnmadd_ph() { + unsafe fn test_mm256_maskz_fmsubadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_maskz_fnmadd_ph(0b0101010101010101, a, b, c); + let r = _mm256_maskz_fmsubadd_ph(0b0011001100110011, a, b, c); let e = _mm256_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fnmadd_ph() { + unsafe fn test_mm512_fmsubadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_fnmadd_ph(a, b, c); - let e = _mm512_set1_ph(1.0); + let r = _mm512_fmsubadd_ph(a, b, c); + let e = _mm512_set_ph( + -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fnmadd_ph() { + unsafe fn test_mm512_mask_fmsubadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fnmadd_ph(a, 0b01010101010101010101010101010101, b, c); + let r = _mm512_mask_fmsubadd_ph(a, 0b00110011001100110011001100110011, b, c); let e = _mm512_set_ph( - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, + 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fnmadd_ph() { + unsafe fn test_mm512_mask3_fmsubadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fnmadd_ph(a, b, c, 0b01010101010101010101010101010101); + let r = _mm512_mask3_fmsubadd_ph(a, b, c, 0b00110011001100110011001100110011); let e = _mm512_set_ph( - 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, - 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, + 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, + 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fnmadd_ph() { + unsafe fn test_mm512_maskz_fmsubadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fnmadd_ph(0b01010101010101010101010101010101, a, b, c); + let r = _mm512_maskz_fmsubadd_ph(0b00110011001100110011001100110011, a, b, c); let e = _mm512_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, - 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, + 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fnmadd_round_ph() { + unsafe fn test_mm512_fmsubadd_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); let r = - _mm512_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set1_ph(1.0); + _mm512_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set_ph( + -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fnmadd_round_ph() { + unsafe fn test_mm512_mask_fmsubadd_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, - 0b01010101010101010101010101010101, + 0b00110011001100110011001100110011, b, c, ); let e = _mm512_set_ph( - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, + 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fnmadd_round_ph() { + unsafe fn test_mm512_mask3_fmsubadd_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask3_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, - 0b01010101010101010101010101010101, + 0b00110011001100110011001100110011, ); let e = _mm512_set_ph( - 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, - 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, + 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, + 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fnmadd_round_ph() { + unsafe fn test_mm512_maskz_fmsubadd_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, + let r = _mm512_maskz_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b00110011001100110011001100110011, a, b, c, ); let e = _mm512_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, - 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, + 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_rcp_ph() { + let a = _mm_set1_ph(2.0); + let r = _mm_rcp_ph(a); + let e = _mm_set1_ph(0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_rcp_ph() { + let a = _mm_set1_ph(2.0); + let src = _mm_set1_ph(1.0); + let r = _mm_mask_rcp_ph(src, 0b01010101, a); + let e = _mm_set_ph(1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_rcp_ph() { + let a = _mm_set1_ph(2.0); + let r = _mm_maskz_rcp_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_rcp_ph() { + let a = _mm256_set1_ph(2.0); + let r = _mm256_rcp_ph(a); + let e = _mm256_set1_ph(0.5); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_rcp_ph() { + let a = _mm256_set1_ph(2.0); + let src = _mm256_set1_ph(1.0); + let r = _mm256_mask_rcp_ph(src, 0b0101010101010101, a); + let e = _mm256_set_ph( + 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_rcp_ph() { + let a = _mm256_set1_ph(2.0); + let r = _mm256_maskz_rcp_ph(0b0101010101010101, a); + let e = _mm256_set_ph( + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_rcp_ph() { + let a = _mm512_set1_ph(2.0); + let r = _mm512_rcp_ph(a); + let e = _mm512_set1_ph(0.5); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fnmadd_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fnmadd_sh(a, b, c); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm512_mask_rcp_ph() { + let a = _mm512_set1_ph(2.0); + let src = _mm512_set1_ph(1.0); + let r = _mm512_mask_rcp_ph(src, 0b01010101010101010101010101010101, a); + let e = _mm512_set_ph( + 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, + 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_rcp_ph() { + let a = _mm512_set1_ph(2.0); + let r = _mm512_maskz_rcp_ph(0b01010101010101010101010101010101, a); + let e = _mm512_set_ph( + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, + 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_rcp_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_rcp_sh(a, b); + let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fnmadd_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fnmadd_sh(a, 0, b, c); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_mask_rcp_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_rcp_sh(src, 0, a, b); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - let r = _mm_mask_fnmadd_sh(a, 1, b, c); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_mask_rcp_sh(src, 1, a, b); + let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fnmadd_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fnmadd_sh(a, b, c, 0); - let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + unsafe fn test_mm_maskz_rcp_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_maskz_rcp_sh(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - let r = _mm_mask3_fnmadd_sh(a, b, c, 1); - let e = _mm_setr_ph(1.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_rcp_sh(1, a, b); + let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fnmadd_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fnmadd_sh(0, a, b, c); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_rsqrt_ph() { + let a = _mm_set1_ph(4.0); + let r = _mm_rsqrt_ph(a); + let e = _mm_set1_ph(0.5); assert_eq_m128h(r, e); - let r = _mm_maskz_fnmadd_sh(1, a, b, c); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_rsqrt_ph() { + let a = _mm_set1_ph(4.0); + let src = _mm_set1_ph(1.0); + let r = _mm_mask_rsqrt_ph(src, 0b01010101, a); + let e = _mm_set_ph(1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fnmadd_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_rsqrt_ph() { + let a = _mm_set1_ph(4.0); + let r = _mm_maskz_rsqrt_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); assert_eq_m128h(r, e); } + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_rsqrt_ph() { + let a = _mm256_set1_ph(4.0); + let r = _mm256_rsqrt_ph(a); + let e = _mm256_set1_ph(0.5); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_rsqrt_ph() { + let a = _mm256_set1_ph(4.0); + let src = _mm256_set1_ph(1.0); + let r = _mm256_mask_rsqrt_ph(src, 0b0101010101010101, a); + let e = _mm256_set_ph( + 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_rsqrt_ph() { + let a = _mm256_set1_ph(4.0); + let r = _mm256_maskz_rsqrt_ph(0b0101010101010101, a); + let e = _mm256_set_ph( + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_rsqrt_ph() { + let a = _mm512_set1_ph(4.0); + let r = _mm512_rsqrt_ph(a); + let e = _mm512_set1_ph(0.5); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_rsqrt_ph() { + let a = _mm512_set1_ph(4.0); + let src = _mm512_set1_ph(1.0); + let r = _mm512_mask_rsqrt_ph(src, 0b01010101010101010101010101010101, a); + let e = _mm512_set_ph( + 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, + 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, + ); + assert_eq_m512h(r, e); + } + #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fnmadd_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 0, b, c, - ); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 1, b, c, + unsafe fn test_mm512_maskz_rsqrt_ph() { + let a = _mm512_set1_ph(4.0); + let r = _mm512_maskz_rsqrt_ph(0b01010101010101010101010101010101, a); + let e = _mm512_set_ph( + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, + 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, ); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fnmadd_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 0, - ); - let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - assert_eq_m128h(r, e); - let r = _mm_mask3_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 1, - ); - let e = _mm_setr_ph(1.0, 30., 31., 32., 33., 34., 35., 36.); + unsafe fn test_mm_rsqrt_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let r = _mm_rsqrt_sh(a, b); + let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fnmadd_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0, a, b, c, - ); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_mask_rsqrt_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_rsqrt_sh(src, 0, a, b); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - let r = _mm_maskz_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 1, a, b, c, - ); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_mask_rsqrt_sh(src, 1, a, b); + let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fnmsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_fnmsub_ph(a, b, c); - let e = _mm_set1_ph(-5.0); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_rsqrt_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let r = _mm_maskz_rsqrt_sh(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fnmsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask_fnmsub_ph(a, 0b01010101, b, c); - let e = _mm_set_ph(1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0); + let r = _mm_maskz_rsqrt_sh(1, a, b); + let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fnmsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask3_fnmsub_ph(a, b, c, 0b01010101); - let e = _mm_set_ph(3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0); + unsafe fn test_mm_sqrt_ph() { + let a = _mm_set1_ph(4.0); + let r = _mm_sqrt_ph(a); + let e = _mm_set1_ph(2.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fnmsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_maskz_fnmsub_ph(0b01010101, a, b, c); - let e = _mm_set_ph(0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0); + unsafe fn test_mm_mask_sqrt_ph() { + let a = _mm_set1_ph(4.0); + let src = _mm_set1_ph(1.0); + let r = _mm_mask_sqrt_ph(src, 0b01010101, a); + let e = _mm_set_ph(1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fnmsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_fnmsub_ph(a, b, c); - let e = _mm256_set1_ph(-5.0); - assert_eq_m256h(r, e); + unsafe fn test_mm_maskz_sqrt_ph() { + let a = _mm_set1_ph(4.0); + let r = _mm_maskz_sqrt_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fnmsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask_fnmsub_ph(a, 0b0101010101010101, b, c); - let e = _mm256_set_ph( - 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, - ); + unsafe fn test_mm256_sqrt_ph() { + let a = _mm256_set1_ph(4.0); + let r = _mm256_sqrt_ph(a); + let e = _mm256_set1_ph(2.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fnmsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask3_fnmsub_ph(a, b, c, 0b0101010101010101); + unsafe fn test_mm256_mask_sqrt_ph() { + let a = _mm256_set1_ph(4.0); + let src = _mm256_set1_ph(1.0); + let r = _mm256_mask_sqrt_ph(src, 0b0101010101010101, a); let e = _mm256_set_ph( - 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, + 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fnmsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_maskz_fnmsub_ph(0b0101010101010101, a, b, c); + unsafe fn test_mm256_maskz_sqrt_ph() { + let a = _mm256_set1_ph(4.0); + let r = _mm256_maskz_sqrt_ph(0b0101010101010101, a); let e = _mm256_set_ph( - 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, + 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fnmsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_fnmsub_ph(a, b, c); - let e = _mm512_set1_ph(-5.0); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fnmsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fnmsub_ph(a, 0b01010101010101010101010101010101, b, c); - let e = _mm512_set_ph( - 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, - 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, - ); + unsafe fn test_mm512_sqrt_ph() { + let a = _mm512_set1_ph(4.0); + let r = _mm512_sqrt_ph(a); + let e = _mm512_set1_ph(2.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fnmsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fnmsub_ph(a, b, c, 0b01010101010101010101010101010101); + unsafe fn test_mm512_mask_sqrt_ph() { + let a = _mm512_set1_ph(4.0); + let src = _mm512_set1_ph(1.0); + let r = _mm512_mask_sqrt_ph(src, 0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, - 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, + 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, + 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fnmsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fnmsub_ph(0b01010101010101010101010101010101, a, b, c); + unsafe fn test_mm512_maskz_sqrt_ph() { + let a = _mm512_set1_ph(4.0); + let r = _mm512_maskz_sqrt_ph(0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, - 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, + 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, + 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fnmsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = - _mm512_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set1_ph(-5.0); + unsafe fn test_mm512_sqrt_round_ph() { + let a = _mm512_set1_ph(4.0); + let r = _mm512_sqrt_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + let e = _mm512_set1_ph(2.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fnmsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, + unsafe fn test_mm512_mask_sqrt_round_ph() { + let a = _mm512_set1_ph(4.0); + let src = _mm512_set1_ph(1.0); + let r = _mm512_mask_sqrt_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0b01010101010101010101010101010101, - b, - c, - ); - let e = _mm512_set_ph( - 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, - 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, - ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fnmsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, - b, - c, - 0b01010101010101010101010101010101, ); let e = _mm512_set_ph( - 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, - 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, + 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, + 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fnmsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_maskz_sqrt_round_ph() { + let a = _mm512_set1_ph(4.0); + let r = _mm512_maskz_sqrt_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b01010101010101010101010101010101, a, - b, - c, ); let e = _mm512_set_ph( - 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, - 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, + 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, + 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fnmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fnmsub_sh(a, b, c); - let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fnmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fnmsub_sh(a, 0, b, c); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_fnmsub_sh(a, 1, b, c); - let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_sqrt_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let r = _mm_sqrt_sh(a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fnmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fnmsub_sh(a, b, c, 0); - let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + unsafe fn test_mm_mask_sqrt_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_sqrt_sh(src, 0, a, b); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - let r = _mm_mask3_fnmsub_sh(a, b, c, 1); - let e = _mm_setr_ph(-5.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_sqrt_sh(src, 1, a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fnmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fnmsub_sh(0, a, b, c); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_maskz_fnmsub_sh(1, a, b, c); - let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_maskz_sqrt_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let r = _mm_maskz_sqrt_sh(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fnmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_maskz_sqrt_sh(1, a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fnmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 0, b, c, - ); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 1, b, c, - ); - let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_sqrt_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let r = _mm_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fnmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 0, + unsafe fn test_mm_mask_sqrt_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, ); - let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - let r = _mm_mask3_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 1, + let r = _mm_mask_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, ); - let e = _mm_setr_ph(-5.0, 30., 31., 32., 33., 34., 35., 36.); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fnmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0, a, b, c, - ); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_maskz_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 1, a, b, c, - ); - let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_maskz_sqrt_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let r = + _mm_maskz_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmaddsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_fmaddsub_ph(a, b, c); - let e = _mm_set_ph(5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0); + let r = + _mm_maskz_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmaddsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask_fmaddsub_ph(a, 0b00110011, b, c); - let e = _mm_set_ph(1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0); + unsafe fn test_mm_max_ph() { + let a = _mm_set1_ph(2.0); + let b = _mm_set1_ph(1.0); + let r = _mm_max_ph(a, b); + let e = _mm_set1_ph(2.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fmaddsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask3_fmaddsub_ph(a, b, c, 0b00110011); - let e = _mm_set_ph(3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0); + unsafe fn test_mm_mask_max_ph() { + let a = _mm_set1_ph(2.0); + let b = _mm_set1_ph(1.0); + let src = _mm_set1_ph(3.0); + let r = _mm_mask_max_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmaddsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_maskz_fmaddsub_ph(0b00110011, a, b, c); - let e = _mm_set_ph(0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0); + unsafe fn test_mm_maskz_max_ph() { + let a = _mm_set1_ph(2.0); + let b = _mm_set1_ph(1.0); + let r = _mm_maskz_max_ph(0b01010101, a, b); + let e = _mm_set_ph(0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmaddsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_fmaddsub_ph(a, b, c); - let e = _mm256_set_ph( - 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, - ); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmaddsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask_fmaddsub_ph(a, 0b0011001100110011, b, c); - let e = _mm256_set_ph( - 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, - ); + unsafe fn test_mm256_max_ph() { + let a = _mm256_set1_ph(2.0); + let b = _mm256_set1_ph(1.0); + let r = _mm256_max_ph(a, b); + let e = _mm256_set1_ph(2.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fmaddsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask3_fmaddsub_ph(a, b, c, 0b0011001100110011); + unsafe fn test_mm256_mask_max_ph() { + let a = _mm256_set1_ph(2.0); + let b = _mm256_set1_ph(1.0); + let src = _mm256_set1_ph(3.0); + let r = _mm256_mask_max_ph(src, 0b0101010101010101, a, b); let e = _mm256_set_ph( - 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, + 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmaddsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_maskz_fmaddsub_ph(0b0011001100110011, a, b, c); + unsafe fn test_mm256_maskz_max_ph() { + let a = _mm256_set1_ph(2.0); + let b = _mm256_set1_ph(1.0); + let r = _mm256_maskz_max_ph(0b0101010101010101, a, b); let e = _mm256_set_ph( - 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, + 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, ); assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmaddsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_fmaddsub_ph(a, b, c); - let e = _mm512_set_ph( - 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, - 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, - ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmaddsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmaddsub_ph(a, 0b00110011001100110011001100110011, b, c); - let e = _mm512_set_ph( - 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, - 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, - ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmaddsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmaddsub_ph(a, b, c, 0b00110011001100110011001100110011); - let e = _mm512_set_ph( - 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, - 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, - ); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_max_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_max_ph(a, b); + let e = _mm512_set1_ph(2.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmaddsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmaddsub_ph(0b00110011001100110011001100110011, a, b, c); + unsafe fn test_mm512_mask_max_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let src = _mm512_set1_ph(3.0); + let r = _mm512_mask_max_ph(src, 0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, - 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, + 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, + 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmaddsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = - _mm512_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + unsafe fn test_mm512_maskz_max_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_maskz_max_ph(0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, - 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, + 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, + 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmaddsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, - 0b00110011001100110011001100110011, - b, - c, - ); - let e = _mm512_set_ph( - 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, - 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, - ); + unsafe fn test_mm512_max_round_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_max_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_ph(2.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmaddsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask_max_round_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let src = _mm512_set1_ph(3.0); + let r = _mm512_mask_max_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, a, b, - c, - 0b00110011001100110011001100110011, ); let e = _mm512_set_ph( - 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, - 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, + 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, + 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmaddsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b00110011001100110011001100110011, + unsafe fn test_mm512_maskz_max_round_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_maskz_max_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, a, b, - c, ); let e = _mm512_set_ph( - 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, - 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, + 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, + 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, ); assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmsubadd_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_fmsubadd_ph(a, b, c); - let e = _mm_set_ph(-1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_max_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_max_sh(a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmsubadd_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask_fmsubadd_ph(a, 0b00110011, b, c); - let e = _mm_set_ph(1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_max_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_max_sh(src, 0, a, b); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_mask_max_sh(src, 1, a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fmsubadd_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask3_fmsubadd_ph(a, b, c, 0b00110011); - let e = _mm_set_ph(3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_max_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_maskz_max_sh(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_max_sh(1, a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_max_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_max_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_mask_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_max_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = + _mm_maskz_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmsubadd_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_maskz_fmsubadd_ph(0b00110011, a, b, c); - let e = _mm_set_ph(0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0); + unsafe fn test_mm_min_ph() { + let a = _mm_set1_ph(2.0); + let b = _mm_set1_ph(1.0); + let r = _mm_min_ph(a, b); + let e = _mm_set1_ph(1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmsubadd_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_fmsubadd_ph(a, b, c); - let e = _mm256_set_ph( - -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, - ); - assert_eq_m256h(r, e); + unsafe fn test_mm_mask_min_ph() { + let a = _mm_set1_ph(2.0); + let b = _mm_set1_ph(1.0); + let src = _mm_set1_ph(3.0); + let r = _mm_mask_min_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmsubadd_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask_fmsubadd_ph(a, 0b0011001100110011, b, c); - let e = _mm256_set_ph( - 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, - ); - assert_eq_m256h(r, e); + unsafe fn test_mm_maskz_min_ph() { + let a = _mm_set1_ph(2.0); + let b = _mm_set1_ph(1.0); + let r = _mm_maskz_min_ph(0b01010101, a, b); + let e = _mm_set_ph(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fmsubadd_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask3_fmsubadd_ph(a, b, c, 0b0011001100110011); - let e = _mm256_set_ph( - 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, - ); + unsafe fn test_mm256_min_ph() { + let a = _mm256_set1_ph(2.0); + let b = _mm256_set1_ph(1.0); + let r = _mm256_min_ph(a, b); + let e = _mm256_set1_ph(1.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmsubadd_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_maskz_fmsubadd_ph(0b0011001100110011, a, b, c); + unsafe fn test_mm256_mask_min_ph() { + let a = _mm256_set1_ph(2.0); + let b = _mm256_set1_ph(1.0); + let src = _mm256_set1_ph(3.0); + let r = _mm256_mask_min_ph(src, 0b0101010101010101, a, b); let e = _mm256_set_ph( - 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmsubadd_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_fmsubadd_ph(a, b, c); - let e = _mm512_set_ph( - -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, - -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_min_ph() { + let a = _mm256_set1_ph(2.0); + let b = _mm256_set1_ph(1.0); + let r = _mm256_maskz_min_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); - assert_eq_m512h(r, e); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmsubadd_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmsubadd_ph(a, 0b00110011001100110011001100110011, b, c); - let e = _mm512_set_ph( - 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, - 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, - ); + unsafe fn test_mm512_min_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_min_ph(a, b); + let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmsubadd_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmsubadd_ph(a, b, c, 0b00110011001100110011001100110011); + unsafe fn test_mm512_mask_min_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let src = _mm512_set1_ph(3.0); + let r = _mm512_mask_min_ph(src, 0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, - 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, + 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmsubadd_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmsubadd_ph(0b00110011001100110011001100110011, a, b, c); + unsafe fn test_mm512_maskz_min_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_maskz_min_ph(0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, - 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmsubadd_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = - _mm512_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set_ph( - -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, - -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, - ); + unsafe fn test_mm512_min_round_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_min_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmsubadd_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask_min_round_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let src = _mm512_set1_ph(3.0); + let r = _mm512_mask_min_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, a, - 0b00110011001100110011001100110011, b, - c, ); let e = _mm512_set_ph( - 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, - 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, + 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmsubadd_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_maskz_min_round_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_maskz_min_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, a, b, - c, - 0b00110011001100110011001100110011, ); let e = _mm512_set_ph( - 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, - 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmsubadd_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b00110011001100110011001100110011, - a, - b, - c, + unsafe fn test_mm_min_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_min_sh(a, b); + let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_min_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_min_sh(src, 0, a, b); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_mask_min_sh(src, 1, a, b); + let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_min_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_maskz_min_sh(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_min_sh(1, a, b); + let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_min_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_min_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, ); - let e = _mm512_set_ph( - 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, - 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_mask_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, ); - assert_eq_m512h(r, e); + let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_min_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = + _mm_maskz_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_rcp_ph() { - let a = _mm_set1_ph(2.0); - let r = _mm_rcp_ph(a); - let e = _mm_set1_ph(0.5); + unsafe fn test_mm_getexp_ph() { + let a = _mm_set1_ph(3.0); + let r = _mm_getexp_ph(a); + let e = _mm_set1_ph(1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_rcp_ph() { - let a = _mm_set1_ph(2.0); - let src = _mm_set1_ph(1.0); - let r = _mm_mask_rcp_ph(src, 0b01010101, a); - let e = _mm_set_ph(1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5); + unsafe fn test_mm_mask_getexp_ph() { + let a = _mm_set1_ph(3.0); + let src = _mm_set1_ph(4.0); + let r = _mm_mask_getexp_ph(src, 0b01010101, a); + let e = _mm_set_ph(4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_rcp_ph() { - let a = _mm_set1_ph(2.0); - let r = _mm_maskz_rcp_ph(0b01010101, a); - let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); + unsafe fn test_mm_maskz_getexp_ph() { + let a = _mm_set1_ph(3.0); + let r = _mm_maskz_getexp_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_rcp_ph() { - let a = _mm256_set1_ph(2.0); - let r = _mm256_rcp_ph(a); - let e = _mm256_set1_ph(0.5); + unsafe fn test_mm256_getexp_ph() { + let a = _mm256_set1_ph(3.0); + let r = _mm256_getexp_ph(a); + let e = _mm256_set1_ph(1.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_rcp_ph() { - let a = _mm256_set1_ph(2.0); - let src = _mm256_set1_ph(1.0); - let r = _mm256_mask_rcp_ph(src, 0b0101010101010101, a); + unsafe fn test_mm256_mask_getexp_ph() { + let a = _mm256_set1_ph(3.0); + let src = _mm256_set1_ph(4.0); + let r = _mm256_mask_getexp_ph(src, 0b0101010101010101, a); let e = _mm256_set_ph( - 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, + 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_rcp_ph() { - let a = _mm256_set1_ph(2.0); - let r = _mm256_maskz_rcp_ph(0b0101010101010101, a); + unsafe fn test_mm256_maskz_getexp_ph() { + let a = _mm256_set1_ph(3.0); + let r = _mm256_maskz_getexp_ph(0b0101010101010101, a); let e = _mm256_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_rcp_ph() { - let a = _mm512_set1_ph(2.0); - let r = _mm512_rcp_ph(a); - let e = _mm512_set1_ph(0.5); + unsafe fn test_mm512_getexp_ph() { + let a = _mm512_set1_ph(3.0); + let r = _mm512_getexp_ph(a); + let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_rcp_ph() { - let a = _mm512_set1_ph(2.0); - let src = _mm512_set1_ph(1.0); - let r = _mm512_mask_rcp_ph(src, 0b01010101010101010101010101010101, a); + unsafe fn test_mm512_mask_getexp_ph() { + let a = _mm512_set1_ph(3.0); + let src = _mm512_set1_ph(4.0); + let r = _mm512_mask_getexp_ph(src, 0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, - 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, + 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, + 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_getexp_ph() { + let a = _mm512_set1_ph(3.0); + let r = _mm512_maskz_getexp_ph(0b01010101010101010101010101010101, a); + let e = _mm512_set_ph( + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_getexp_round_ph() { + let a = _mm512_set1_ph(3.0); + let r = _mm512_getexp_round_ph::<_MM_FROUND_NO_EXC>(a); + let e = _mm512_set1_ph(1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_getexp_round_ph() { + let a = _mm512_set1_ph(3.0); + let src = _mm512_set1_ph(4.0); + let r = _mm512_mask_getexp_round_ph::<_MM_FROUND_NO_EXC>( + src, + 0b01010101010101010101010101010101, + a, + ); + let e = _mm512_set_ph( + 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, + 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_getexp_round_ph() { + let a = _mm512_set1_ph(3.0); + let r = _mm512_maskz_getexp_round_ph::<_MM_FROUND_NO_EXC>( + 0b01010101010101010101010101010101, + a, + ); + let e = _mm512_set_ph( + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_rcp_ph() { - let a = _mm512_set1_ph(2.0); - let r = _mm512_maskz_rcp_ph(0b01010101010101010101010101010101, a); - let e = _mm512_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, - 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, - ); - assert_eq_m512h(r, e); + unsafe fn test_mm_getexp_sh() { + let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_getexp_sh(a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_getexp_sh() { + let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(4.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_getexp_sh(src, 0, a, b); + let e = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_mask_getexp_sh(src, 1, a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_getexp_sh() { + let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_maskz_getexp_sh(0, a, b); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_maskz_getexp_sh(1, a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_rcp_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let r = _mm_rcp_sh(a, b); - let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_getexp_round_sh() { + let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_getexp_round_sh::<_MM_FROUND_NO_EXC>(a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_rcp_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); - let r = _mm_mask_rcp_sh(src, 0, a, b); - let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_mask_getexp_round_sh() { + let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(4.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_getexp_round_sh::<_MM_FROUND_NO_EXC>(src, 0, a, b); + let e = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_rcp_sh(src, 1, a, b); - let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_mask_getexp_round_sh::<_MM_FROUND_NO_EXC>(src, 1, a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_rcp_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let r = _mm_maskz_rcp_sh(0, a, b); - let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_maskz_getexp_round_sh() { + let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_maskz_getexp_round_sh::<_MM_FROUND_NO_EXC>(0, a, b); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_rcp_sh(1, a, b); - let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_maskz_getexp_round_sh::<_MM_FROUND_NO_EXC>(1, a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_rsqrt_ph() { - let a = _mm_set1_ph(4.0); - let r = _mm_rsqrt_ph(a); - let e = _mm_set1_ph(0.5); + unsafe fn test_mm_getmant_ph() { + let a = _mm_set1_ph(10.0); + let r = _mm_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(a); + let e = _mm_set1_ph(1.25); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_rsqrt_ph() { - let a = _mm_set1_ph(4.0); - let src = _mm_set1_ph(1.0); - let r = _mm_mask_rsqrt_ph(src, 0b01010101, a); - let e = _mm_set_ph(1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5); + unsafe fn test_mm_mask_getmant_ph() { + let a = _mm_set1_ph(10.0); + let src = _mm_set1_ph(20.0); + let r = _mm_mask_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(src, 0b01010101, a); + let e = _mm_set_ph(20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_rsqrt_ph() { - let a = _mm_set1_ph(4.0); - let r = _mm_maskz_rsqrt_ph(0b01010101, a); - let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); + unsafe fn test_mm_maskz_getmant_ph() { + let a = _mm_set1_ph(10.0); + let r = _mm_maskz_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(0b01010101, a); + let e = _mm_set_ph(0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_rsqrt_ph() { - let a = _mm256_set1_ph(4.0); - let r = _mm256_rsqrt_ph(a); - let e = _mm256_set1_ph(0.5); + unsafe fn test_mm256_getmant_ph() { + let a = _mm256_set1_ph(10.0); + let r = _mm256_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(a); + let e = _mm256_set1_ph(1.25); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_rsqrt_ph() { - let a = _mm256_set1_ph(4.0); - let src = _mm256_set1_ph(1.0); - let r = _mm256_mask_rsqrt_ph(src, 0b0101010101010101, a); + unsafe fn test_mm256_mask_getmant_ph() { + let a = _mm256_set1_ph(10.0); + let src = _mm256_set1_ph(20.0); + let r = _mm256_mask_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>( + src, + 0b0101010101010101, + a, + ); let e = _mm256_set_ph( - 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, + 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, + 20.0, 1.25, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_rsqrt_ph() { - let a = _mm256_set1_ph(4.0); - let r = _mm256_maskz_rsqrt_ph(0b0101010101010101, a); + unsafe fn test_mm256_maskz_getmant_ph() { + let a = _mm256_set1_ph(10.0); + let r = _mm256_maskz_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>( + 0b0101010101010101, + a, + ); let e = _mm256_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_rsqrt_ph() { - let a = _mm512_set1_ph(4.0); - let r = _mm512_rsqrt_ph(a); - let e = _mm512_set1_ph(0.5); + unsafe fn test_mm512_getmant_ph() { + let a = _mm512_set1_ph(10.0); + let r = _mm512_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(a); + let e = _mm512_set1_ph(1.25); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_rsqrt_ph() { - let a = _mm512_set1_ph(4.0); - let src = _mm512_set1_ph(1.0); - let r = _mm512_mask_rsqrt_ph(src, 0b01010101010101010101010101010101, a); + unsafe fn test_mm512_mask_getmant_ph() { + let a = _mm512_set1_ph(10.0); + let src = _mm512_set1_ph(20.0); + let r = _mm512_mask_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>( + src, + 0b01010101010101010101010101010101, + a, + ); let e = _mm512_set_ph( - 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, - 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, + 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, + 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, + 20.0, 1.25, 20.0, 1.25, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_rsqrt_ph() { - let a = _mm512_set1_ph(4.0); - let r = _mm512_maskz_rsqrt_ph(0b01010101010101010101010101010101, a); + unsafe fn test_mm512_maskz_getmant_ph() { + let a = _mm512_set1_ph(10.0); + let r = _mm512_maskz_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>( + 0b01010101010101010101010101010101, + a, + ); let e = _mm512_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, - 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, + 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_rsqrt_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let r = _mm_rsqrt_sh(a, b); - let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm512_getmant_round_ph() { + let a = _mm512_set1_ph(10.0); + let r = + _mm512_getmant_round_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN, _MM_FROUND_NO_EXC>( + a, + ); + let e = _mm512_set1_ph(1.25); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_getmant_round_ph() { + let a = _mm512_set1_ph(10.0); + let src = _mm512_set1_ph(20.0); + let r = _mm512_mask_getmant_round_ph::< + _MM_MANT_NORM_P75_1P5, + _MM_MANT_SIGN_NAN, + _MM_FROUND_NO_EXC, + >(src, 0b01010101010101010101010101010101, a); + let e = _mm512_set_ph( + 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, + 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, + 20.0, 1.25, 20.0, 1.25, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_getmant_round_ph() { + let a = _mm512_set1_ph(10.0); + let r = _mm512_maskz_getmant_round_ph::< + _MM_MANT_NORM_P75_1P5, + _MM_MANT_SIGN_NAN, + _MM_FROUND_NO_EXC, + >(0b01010101010101010101010101010101, a); + let e = _mm512_set_ph( + 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, + 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_getmant_sh() { + let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_getmant_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(a, b); + let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_rsqrt_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); - let r = _mm_mask_rsqrt_sh(src, 0, a, b); - let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_mask_getmant_sh() { + let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(20.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_getmant_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(src, 0, a, b); + let e = _mm_setr_ph(20.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_rsqrt_sh(src, 1, a, b); - let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_mask_getmant_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(src, 1, a, b); + let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_rsqrt_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let r = _mm_maskz_rsqrt_sh(0, a, b); - let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_maskz_getmant_sh() { + let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_maskz_getmant_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(0, a, b); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_rsqrt_sh(1, a, b); - let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_maskz_getmant_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(1, a, b); + let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_getmant_round_sh() { + let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_getmant_round_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN, _MM_FROUND_NO_EXC>( + a, b, + ); + let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_getmant_round_sh() { + let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(20.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_getmant_round_sh::< + _MM_MANT_NORM_P75_1P5, + _MM_MANT_SIGN_NAN, + _MM_FROUND_NO_EXC, + >(src, 0, a, b); + let e = _mm_setr_ph(20.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_mask_getmant_round_sh::< + _MM_MANT_NORM_P75_1P5, + _MM_MANT_SIGN_NAN, + _MM_FROUND_NO_EXC, + >(src, 1, a, b); + let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_getmant_round_sh() { + let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_maskz_getmant_round_sh::< + _MM_MANT_NORM_P75_1P5, + _MM_MANT_SIGN_NAN, + _MM_FROUND_NO_EXC, + >(0, a, b); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_maskz_getmant_round_sh::< + _MM_MANT_NORM_P75_1P5, + _MM_MANT_SIGN_NAN, + _MM_FROUND_NO_EXC, + >(1, a, b); + let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_sqrt_ph() { - let a = _mm_set1_ph(4.0); - let r = _mm_sqrt_ph(a); - let e = _mm_set1_ph(2.0); + unsafe fn test_mm_roundscale_ph() { + let a = _mm_set1_ph(1.1); + let r = _mm_roundscale_ph::<0>(a); + let e = _mm_set1_ph(1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_sqrt_ph() { - let a = _mm_set1_ph(4.0); - let src = _mm_set1_ph(1.0); - let r = _mm_mask_sqrt_ph(src, 0b01010101, a); - let e = _mm_set_ph(1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0); + unsafe fn test_mm_mask_roundscale_ph() { + let a = _mm_set1_ph(1.1); + let src = _mm_set1_ph(2.0); + let r = _mm_mask_roundscale_ph::<0>(src, 0b01010101, a); + let e = _mm_set_ph(2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_sqrt_ph() { - let a = _mm_set1_ph(4.0); - let r = _mm_maskz_sqrt_ph(0b01010101, a); - let e = _mm_set_ph(0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0); + unsafe fn test_mm_maskz_roundscale_ph() { + let a = _mm_set1_ph(1.1); + let r = _mm_maskz_roundscale_ph::<0>(0b01010101, a); + let e = _mm_set_ph(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_sqrt_ph() { - let a = _mm256_set1_ph(4.0); - let r = _mm256_sqrt_ph(a); - let e = _mm256_set1_ph(2.0); + unsafe fn test_mm256_roundscale_ph() { + let a = _mm256_set1_ph(1.1); + let r = _mm256_roundscale_ph::<0>(a); + let e = _mm256_set1_ph(1.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_sqrt_ph() { - let a = _mm256_set1_ph(4.0); - let src = _mm256_set1_ph(1.0); - let r = _mm256_mask_sqrt_ph(src, 0b0101010101010101, a); + unsafe fn test_mm256_mask_roundscale_ph() { + let a = _mm256_set1_ph(1.1); + let src = _mm256_set1_ph(2.0); + let r = _mm256_mask_roundscale_ph::<0>(src, 0b0101010101010101, a); let e = _mm256_set_ph( - 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, + 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_sqrt_ph() { - let a = _mm256_set1_ph(4.0); - let r = _mm256_maskz_sqrt_ph(0b0101010101010101, a); + unsafe fn test_mm256_maskz_roundscale_ph() { + let a = _mm256_set1_ph(1.1); + let r = _mm256_maskz_roundscale_ph::<0>(0b0101010101010101, a); let e = _mm256_set_ph( - 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_sqrt_ph() { - let a = _mm512_set1_ph(4.0); - let r = _mm512_sqrt_ph(a); - let e = _mm512_set1_ph(2.0); + unsafe fn test_mm512_roundscale_ph() { + let a = _mm512_set1_ph(1.1); + let r = _mm512_roundscale_ph::<0>(a); + let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_sqrt_ph() { - let a = _mm512_set1_ph(4.0); - let src = _mm512_set1_ph(1.0); - let r = _mm512_mask_sqrt_ph(src, 0b01010101010101010101010101010101, a); + unsafe fn test_mm512_mask_roundscale_ph() { + let a = _mm512_set1_ph(1.1); + let src = _mm512_set1_ph(2.0); + let r = _mm512_mask_roundscale_ph::<0>(src, 0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, - 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, + 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, + 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_sqrt_ph() { - let a = _mm512_set1_ph(4.0); - let r = _mm512_maskz_sqrt_ph(0b01010101010101010101010101010101, a); + unsafe fn test_mm512_maskz_roundscale_ph() { + let a = _mm512_set1_ph(1.1); + let r = _mm512_maskz_roundscale_ph::<0>(0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, - 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_sqrt_round_ph() { - let a = _mm512_set1_ph(4.0); - let r = _mm512_sqrt_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); - let e = _mm512_set1_ph(2.0); + unsafe fn test_mm512_roundscale_round_ph() { + let a = _mm512_set1_ph(1.1); + let r = _mm512_roundscale_round_ph::<0, _MM_FROUND_NO_EXC>(a); + let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_sqrt_round_ph() { - let a = _mm512_set1_ph(4.0); - let src = _mm512_set1_ph(1.0); - let r = _mm512_mask_sqrt_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask_roundscale_round_ph() { + let a = _mm512_set1_ph(1.1); + let src = _mm512_set1_ph(2.0); + let r = _mm512_mask_roundscale_round_ph::<0, _MM_FROUND_NO_EXC>( src, 0b01010101010101010101010101010101, a, ); let e = _mm512_set_ph( - 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, - 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, + 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, + 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_sqrt_round_ph() { - let a = _mm512_set1_ph(4.0); - let r = _mm512_maskz_sqrt_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_maskz_roundscale_round_ph() { + let a = _mm512_set1_ph(1.1); + let r = _mm512_maskz_roundscale_round_ph::<0, _MM_FROUND_NO_EXC>( 0b01010101010101010101010101010101, a, ); let e = _mm512_set_ph( - 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, - 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_sqrt_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let r = _mm_sqrt_sh(a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_roundscale_sh() { + let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_roundscale_sh::<0>(a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_sqrt_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); - let r = _mm_mask_sqrt_sh(src, 0, a, b); - let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_mask_roundscale_sh() { + let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_roundscale_sh::<0>(src, 0, a, b); + let e = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_sqrt_sh(src, 1, a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_mask_roundscale_sh::<0>(src, 1, a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_sqrt_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let r = _mm_maskz_sqrt_sh(0, a, b); - let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - let r = _mm_maskz_sqrt_sh(1, a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_maskz_roundscale_sh() { + let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_maskz_roundscale_sh::<0>(0, a, b); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_sqrt_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let r = _mm_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_maskz_roundscale_sh::<0>(1, a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_sqrt_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); - let r = _mm_mask_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_roundscale_round_sh() { + let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_roundscale_round_sh::<0, _MM_FROUND_NO_EXC>(a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_roundscale_round_sh() { + let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_roundscale_round_sh::<0, _MM_FROUND_NO_EXC>(src, 0, a, b); + let e = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_mask_roundscale_round_sh::<0, _MM_FROUND_NO_EXC>(src, 1, a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_sqrt_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let r = - _mm_maskz_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_maskz_roundscale_round_sh() { + let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_maskz_roundscale_round_sh::<0, _MM_FROUND_NO_EXC>(0, a, b); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = - _mm_maskz_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_maskz_roundscale_round_sh::<0, _MM_FROUND_NO_EXC>(1, a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_max_ph() { - let a = _mm_set1_ph(2.0); - let b = _mm_set1_ph(1.0); - let r = _mm_max_ph(a, b); - let e = _mm_set1_ph(2.0); + unsafe fn test_mm_scalef_ph() { + let a = _mm_set1_ph(1.); + let b = _mm_set1_ph(3.); + let r = _mm_scalef_ph(a, b); + let e = _mm_set1_ph(8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_max_ph() { - let a = _mm_set1_ph(2.0); - let b = _mm_set1_ph(1.0); - let src = _mm_set1_ph(3.0); - let r = _mm_mask_max_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0); + unsafe fn test_mm_mask_scalef_ph() { + let a = _mm_set1_ph(1.); + let b = _mm_set1_ph(3.); + let src = _mm_set1_ph(2.); + let r = _mm_mask_scalef_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_max_ph() { - let a = _mm_set1_ph(2.0); - let b = _mm_set1_ph(1.0); - let r = _mm_maskz_max_ph(0b01010101, a, b); - let e = _mm_set_ph(0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0); + unsafe fn test_mm_maskz_scalef_ph() { + let a = _mm_set1_ph(1.); + let b = _mm_set1_ph(3.); + let r = _mm_maskz_scalef_ph(0b01010101, a, b); + let e = _mm_set_ph(0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_max_ph() { - let a = _mm256_set1_ph(2.0); - let b = _mm256_set1_ph(1.0); - let r = _mm256_max_ph(a, b); - let e = _mm256_set1_ph(2.0); + unsafe fn test_mm256_scalef_ph() { + let a = _mm256_set1_ph(1.); + let b = _mm256_set1_ph(3.); + let r = _mm256_scalef_ph(a, b); + let e = _mm256_set1_ph(8.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_max_ph() { - let a = _mm256_set1_ph(2.0); - let b = _mm256_set1_ph(1.0); - let src = _mm256_set1_ph(3.0); - let r = _mm256_mask_max_ph(src, 0b0101010101010101, a, b); + unsafe fn test_mm256_mask_scalef_ph() { + let a = _mm256_set1_ph(1.); + let b = _mm256_set1_ph(3.); + let src = _mm256_set1_ph(2.); + let r = _mm256_mask_scalef_ph(src, 0b0101010101010101, a, b); let e = _mm256_set_ph( - 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, + 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_max_ph() { - let a = _mm256_set1_ph(2.0); - let b = _mm256_set1_ph(1.0); - let r = _mm256_maskz_max_ph(0b0101010101010101, a, b); + unsafe fn test_mm256_maskz_scalef_ph() { + let a = _mm256_set1_ph(1.); + let b = _mm256_set1_ph(3.); + let r = _mm256_maskz_scalef_ph(0b0101010101010101, a, b); let e = _mm256_set_ph( - 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, + 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_max_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let r = _mm512_max_ph(a, b); - let e = _mm512_set1_ph(2.0); + unsafe fn test_mm512_scalef_ph() { + let a = _mm512_set1_ph(1.); + let b = _mm512_set1_ph(3.); + let r = _mm512_scalef_ph(a, b); + let e = _mm512_set1_ph(8.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_max_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let src = _mm512_set1_ph(3.0); - let r = _mm512_mask_max_ph(src, 0b01010101010101010101010101010101, a, b); + unsafe fn test_mm512_mask_scalef_ph() { + let a = _mm512_set1_ph(1.); + let b = _mm512_set1_ph(3.); + let src = _mm512_set1_ph(2.); + let r = _mm512_mask_scalef_ph(src, 0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, - 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, + 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, + 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_max_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let r = _mm512_maskz_max_ph(0b01010101010101010101010101010101, a, b); + unsafe fn test_mm512_maskz_scalef_ph() { + let a = _mm512_set1_ph(1.); + let b = _mm512_set1_ph(3.); + let r = _mm512_maskz_scalef_ph(0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, - 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, + 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, + 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_max_round_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let r = _mm512_max_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_ph(2.0); + unsafe fn test_mm512_scalef_round_ph() { + let a = _mm512_set1_ph(1.); + let b = _mm512_set1_ph(3.); + let r = _mm512_scalef_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_ph(8.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_max_round_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let src = _mm512_set1_ph(3.0); - let r = _mm512_mask_max_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask_scalef_round_ph() { + let a = _mm512_set1_ph(1.); + let b = _mm512_set1_ph(3.); + let src = _mm512_set1_ph(2.); + let r = _mm512_mask_scalef_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 0b01010101010101010101010101010101, a, b, ); let e = _mm512_set_ph( - 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, - 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, + 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, + 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_max_round_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let r = _mm512_maskz_max_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_maskz_scalef_round_ph() { + let a = _mm512_set1_ph(1.); + let b = _mm512_set1_ph(3.); + let r = _mm512_maskz_scalef_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b01010101010101010101010101010101, a, b, ); let e = _mm512_set_ph( - 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, - 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, + 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, + 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_max_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let r = _mm_max_sh(a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_scalef_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_scalef_sh(a, b); + let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_max_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); - let r = _mm_mask_max_sh(src, 0, a, b); - let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_mask_scalef_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(2.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_scalef_sh(src, 0, a, b); + let e = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_max_sh(src, 1, a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_mask_scalef_sh(src, 1, a, b); + let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_max_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let r = _mm_maskz_max_sh(0, a, b); - let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_maskz_scalef_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_maskz_scalef_sh(0, a, b); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_max_sh(1, a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_maskz_scalef_sh(1, a, b); + let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_max_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let r = _mm_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_scalef_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_scalef_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_max_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); - let r = _mm_mask_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_mask_scalef_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(2.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_scalef_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 0, a, b, ); - let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let e = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask_scalef_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 1, a, b, ); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_max_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + unsafe fn test_mm_maskz_scalef_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); let r = - _mm_maskz_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + _mm_maskz_scalef_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); let r = - _mm_maskz_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + _mm_maskz_scalef_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_min_ph() { - let a = _mm_set1_ph(2.0); - let b = _mm_set1_ph(1.0); - let r = _mm_min_ph(a, b); - let e = _mm_set1_ph(1.0); + unsafe fn test_mm_reduce_ph() { + let a = _mm_set1_ph(1.25); + let r = _mm_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(a); + let e = _mm_set1_ph(0.25); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_min_ph() { - let a = _mm_set1_ph(2.0); - let b = _mm_set1_ph(1.0); - let src = _mm_set1_ph(3.0); - let r = _mm_mask_min_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0); + unsafe fn test_mm_mask_reduce_ph() { + let a = _mm_set1_ph(1.25); + let src = _mm_set1_ph(2.0); + let r = _mm_mask_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(src, 0b01010101, a); + let e = _mm_set_ph(2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_min_ph() { - let a = _mm_set1_ph(2.0); - let b = _mm_set1_ph(1.0); - let r = _mm_maskz_min_ph(0b01010101, a, b); - let e = _mm_set_ph(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); + unsafe fn test_mm_maskz_reduce_ph() { + let a = _mm_set1_ph(1.25); + let r = _mm_maskz_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(0b01010101, a); + let e = _mm_set_ph(0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_min_ph() { - let a = _mm256_set1_ph(2.0); - let b = _mm256_set1_ph(1.0); - let r = _mm256_min_ph(a, b); - let e = _mm256_set1_ph(1.0); + unsafe fn test_mm256_reduce_ph() { + let a = _mm256_set1_ph(1.25); + let r = _mm256_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(a); + let e = _mm256_set1_ph(0.25); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_min_ph() { - let a = _mm256_set1_ph(2.0); - let b = _mm256_set1_ph(1.0); - let src = _mm256_set1_ph(3.0); - let r = _mm256_mask_min_ph(src, 0b0101010101010101, a, b); + unsafe fn test_mm256_mask_reduce_ph() { + let a = _mm256_set1_ph(1.25); + let src = _mm256_set1_ph(2.0); + let r = _mm256_mask_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(src, 0b0101010101010101, a); let e = _mm256_set_ph( - 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, + 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_min_ph() { - let a = _mm256_set1_ph(2.0); - let b = _mm256_set1_ph(1.0); - let r = _mm256_maskz_min_ph(0b0101010101010101, a, b); + unsafe fn test_mm256_maskz_reduce_ph() { + let a = _mm256_set1_ph(1.25); + let r = _mm256_maskz_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(0b0101010101010101, a); let e = _mm256_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_min_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let r = _mm512_min_ph(a, b); - let e = _mm512_set1_ph(1.0); + unsafe fn test_mm512_reduce_ph() { + let a = _mm512_set1_ph(1.25); + let r = _mm512_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(a); + let e = _mm512_set1_ph(0.25); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_min_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let src = _mm512_set1_ph(3.0); - let r = _mm512_mask_min_ph(src, 0b01010101010101010101010101010101, a, b); + unsafe fn test_mm512_mask_reduce_ph() { + let a = _mm512_set1_ph(1.25); + let src = _mm512_set1_ph(2.0); + let r = _mm512_mask_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>( + src, + 0b01010101010101010101010101010101, + a, + ); let e = _mm512_set_ph( - 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, - 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, + 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, + 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_min_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let r = _mm512_maskz_min_ph(0b01010101010101010101010101010101, a, b); + unsafe fn test_mm512_maskz_reduce_ph() { + let a = _mm512_set1_ph(1.25); + let r = _mm512_maskz_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>( + 0b01010101010101010101010101010101, + a, + ); let e = _mm512_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, - 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, + 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_min_round_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let r = _mm512_min_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_ph(1.0); + unsafe fn test_mm512_reduce_round_ph() { + let a = _mm512_set1_ph(1.25); + let r = _mm512_reduce_round_ph::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>(a); + let e = _mm512_set1_ph(0.25); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_min_round_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let src = _mm512_set1_ph(3.0); - let r = _mm512_mask_min_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask_reduce_round_ph() { + let a = _mm512_set1_ph(1.25); + let src = _mm512_set1_ph(2.0); + let r = _mm512_mask_reduce_round_ph::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>( src, 0b01010101010101010101010101010101, a, - b, ); let e = _mm512_set_ph( - 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, - 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, + 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, + 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_min_round_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let r = _mm512_maskz_min_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_maskz_reduce_round_ph() { + let a = _mm512_set1_ph(1.25); + let r = _mm512_maskz_reduce_round_ph::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>( 0b01010101010101010101010101010101, a, - b, ); let e = _mm512_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, - 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, + 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_min_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let r = _mm_min_sh(a, b); - let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_reduce_sh() { + let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_reduce_sh::<{ 16 | _MM_FROUND_TO_ZERO }>(a, b); + let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_min_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); - let r = _mm_mask_min_sh(src, 0, a, b); - let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_mask_reduce_sh() { + let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(2.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_reduce_sh::<{ 16 | _MM_FROUND_TO_ZERO }>(src, 0, a, b); + let e = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_min_sh(src, 1, a, b); - let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_mask_reduce_sh::<{ 16 | _MM_FROUND_TO_ZERO }>(src, 1, a, b); + let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_min_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let r = _mm_maskz_min_sh(0, a, b); - let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_maskz_reduce_sh() { + let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_maskz_reduce_sh::<{ 16 | _MM_FROUND_TO_ZERO }>(0, a, b); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_min_sh(1, a, b); - let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_maskz_reduce_sh::<{ 16 | _MM_FROUND_TO_ZERO }>(1, a, b); + let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_min_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let r = _mm_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_reduce_round_sh() { + let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_reduce_round_sh::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>(a, b); + let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_min_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); - let r = _mm_mask_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_mask_reduce_round_sh() { + let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(2.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_reduce_round_sh::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>( src, 0, a, b, ); - let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let e = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask_reduce_round_sh::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>( src, 1, a, b, ); - let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_min_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + unsafe fn test_mm_maskz_reduce_round_sh() { + let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); let r = - _mm_maskz_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + _mm_maskz_reduce_round_sh::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>(0, a, b); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); let r = - _mm_maskz_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + _mm_maskz_reduce_round_sh::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>(1, a, b); + let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } } From d304918924a06925a242adffbcdef155fc6866e9 Mon Sep 17 00:00:00 2001 From: sayantn Date: Mon, 15 Jul 2024 22:43:22 +0530 Subject: [PATCH 07/11] AVX512FP16 Part 6: Remaining `cmpph`, `fpclass`, reduce, `blend`, `permutex` --- crates/core_arch/missing-x86.md | 35 - crates/core_arch/src/x86/avx512fp16.rs | 1126 ++++++++++++++++++++++++ 2 files changed, 1126 insertions(+), 35 deletions(-) diff --git a/crates/core_arch/missing-x86.md b/crates/core_arch/missing-x86.md index 72fc8b840e..8fee3cd36f 100644 --- a/crates/core_arch/missing-x86.md +++ b/crates/core_arch/missing-x86.md @@ -55,7 +55,6 @@ * [ ] [`_mm256_cvtsh_h`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtsh_h) * [ ] [`_mm256_set1_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set1_pch) - * [ ] [`_mm512_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_ph_mask) * [ ] [`_mm512_cmp_round_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_round_ph_mask) * [ ] [`_mm512_cvt_roundepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundepi16_ph) * [ ] [`_mm512_cvt_roundepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundepi32_ph) @@ -102,9 +101,6 @@ * [ ] [`_mm512_cvtx_roundps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtx_roundps_ph) * [ ] [`_mm512_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtxph_ps) * [ ] [`_mm512_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtxps_ph) - * [ ] [`_mm512_fpclass_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fpclass_ph_mask) - * [ ] [`_mm512_mask_blend_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_blend_ph) - * [ ] [`_mm512_mask_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_ph_mask) * [ ] [`_mm512_mask_cmp_round_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_round_ph_mask) * [ ] [`_mm512_mask_cvt_roundepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundepi16_ph) * [ ] [`_mm512_mask_cvt_roundepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundepi32_ph) @@ -150,7 +146,6 @@ * [ ] [`_mm512_mask_cvtx_roundps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtx_roundps_ph) * [ ] [`_mm512_mask_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtxph_ps) * [ ] [`_mm512_mask_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtxps_ph) - * [ ] [`_mm512_mask_fpclass_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fpclass_ph_mask) * [ ] [`_mm512_maskz_cvt_roundepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepi16_ph) * [ ] [`_mm512_maskz_cvt_roundepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepi32_ph) * [ ] [`_mm512_maskz_cvt_roundepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepi64_ph) @@ -195,12 +190,6 @@ * [ ] [`_mm512_maskz_cvtx_roundps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtx_roundps_ph) * [ ] [`_mm512_maskz_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtxph_ps) * [ ] [`_mm512_maskz_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtxps_ph) - * [ ] [`_mm512_permutex2var_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutex2var_ph) - * [ ] [`_mm512_permutexvar_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutexvar_ph) - * [ ] [`_mm512_reduce_add_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_add_ph) - * [ ] [`_mm512_reduce_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_max_ph) - * [ ] [`_mm512_reduce_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_min_ph) - * [ ] [`_mm512_reduce_mul_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_mul_ph) * [ ] [`_mm512_set1_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set1_pch) * [ ] [`_mm_cvt_roundi32_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundi32_sh) * [ ] [`_mm_cvt_roundi64_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundi64_sh) @@ -237,7 +226,6 @@ * [ ] [`_mm_cvttsh_u64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsh_u64) * [ ] [`_mm_cvtu32_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtu32_sh) * [ ] [`_mm_cvtu64_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtu64_sh) - * [ ] [`_mm_fpclass_sh_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fpclass_sh_mask) * [ ] [`_mm_mask_cvt_roundsd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvt_roundsd_sh) * [ ] [`_mm_mask_cvt_roundsh_sd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvt_roundsh_sd) * [ ] [`_mm_mask_cvt_roundsh_ss`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvt_roundsh_ss) @@ -246,7 +234,6 @@ * [ ] [`_mm_mask_cvtsh_sd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsh_sd) * [ ] [`_mm_mask_cvtsh_ss`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsh_ss) * [ ] [`_mm_mask_cvtss_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtss_sh) - * [ ] [`_mm_mask_fpclass_sh_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fpclass_sh_mask) * [ ] [`_mm_maskz_cvt_roundsd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvt_roundsd_sh) * [ ] [`_mm_maskz_cvt_roundsh_sd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvt_roundsh_sd) * [ ] [`_mm_maskz_cvt_roundsh_ss`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvt_roundsh_ss) @@ -261,7 +248,6 @@
["AVX512_FP16", "AVX512VL"]

- * [ ] [`_mm256_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmp_ph_mask) * [ ] [`_mm256_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi16_ph) * [ ] [`_mm256_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi32_ph) * [ ] [`_mm256_cvtepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi64_ph) @@ -284,9 +270,6 @@ * [ ] [`_mm256_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvttph_epu64) * [ ] [`_mm256_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtxph_ps) * [ ] [`_mm256_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtxps_ph) - * [ ] [`_mm256_fpclass_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fpclass_ph_mask) - * [ ] [`_mm256_mask_blend_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_blend_ph) - * [ ] [`_mm256_mask_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmp_ph_mask) * [ ] [`_mm256_mask_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi16_ph) * [ ] [`_mm256_mask_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi32_ph) * [ ] [`_mm256_mask_cvtepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi64_ph) @@ -309,7 +292,6 @@ * [ ] [`_mm256_mask_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvttph_epu64) * [ ] [`_mm256_mask_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtxph_ps) * [ ] [`_mm256_mask_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtxps_ph) - * [ ] [`_mm256_mask_fpclass_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fpclass_ph_mask) * [ ] [`_mm256_maskz_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi16_ph) * [ ] [`_mm256_maskz_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi32_ph) * [ ] [`_mm256_maskz_cvtepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi64_ph) @@ -332,13 +314,6 @@ * [ ] [`_mm256_maskz_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvttph_epu64) * [ ] [`_mm256_maskz_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtxph_ps) * [ ] [`_mm256_maskz_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtxps_ph) - * [ ] [`_mm256_permutex2var_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutex2var_ph) - * [ ] [`_mm256_permutexvar_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutexvar_ph) - * [ ] [`_mm256_reduce_add_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_reduce_add_ph) - * [ ] [`_mm256_reduce_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_reduce_max_ph) - * [ ] [`_mm256_reduce_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_reduce_min_ph) - * [ ] [`_mm256_reduce_mul_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_reduce_mul_ph) - * [ ] [`_mm_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_ph_mask) * [ ] [`_mm_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi16_ph) * [ ] [`_mm_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi32_ph) * [ ] [`_mm_cvtepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi64_ph) @@ -361,9 +336,6 @@ * [ ] [`_mm_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttph_epu64) * [ ] [`_mm_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtxph_ps) * [ ] [`_mm_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtxps_ph) - * [ ] [`_mm_fpclass_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fpclass_ph_mask) - * [ ] [`_mm_mask_blend_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_blend_ph) - * [ ] [`_mm_mask_cmp_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_ph_mask) * [ ] [`_mm_mask_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi16_ph) * [ ] [`_mm_mask_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi32_ph) * [ ] [`_mm_mask_cvtepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi64_ph) @@ -386,7 +358,6 @@ * [ ] [`_mm_mask_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvttph_epu64) * [ ] [`_mm_mask_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtxph_ps) * [ ] [`_mm_mask_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtxps_ph) - * [ ] [`_mm_mask_fpclass_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fpclass_ph_mask) * [ ] [`_mm_maskz_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi16_ph) * [ ] [`_mm_maskz_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi32_ph) * [ ] [`_mm_maskz_cvtepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi64_ph) @@ -409,12 +380,6 @@ * [ ] [`_mm_maskz_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvttph_epu64) * [ ] [`_mm_maskz_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtxph_ps) * [ ] [`_mm_maskz_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtxps_ph) - * [ ] [`_mm_permutex2var_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permutex2var_ph) - * [ ] [`_mm_permutexvar_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permutexvar_ph) - * [ ] [`_mm_reduce_add_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_add_ph) - * [ ] [`_mm_reduce_max_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_max_ph) - * [ ] [`_mm_reduce_min_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_min_ph) - * [ ] [`_mm_reduce_mul_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_mul_ph)

diff --git a/crates/core_arch/src/x86/avx512fp16.rs b/crates/core_arch/src/x86/avx512fp16.rs index 3c04d9ae90..3889ce1f5e 100644 --- a/crates/core_arch/src/x86/avx512fp16.rs +++ b/crates/core_arch/src/x86/avx512fp16.rs @@ -615,6 +615,127 @@ pub unsafe fn _mm512_zextph128_ph512(a: __m128h) -> __m512h { ) } +macro_rules! cmp_asm { + ($mask_type: ty, $reg: ident, $a: expr, $b: expr) => {{ + let dst: $mask_type; + crate::arch::asm!( + "vcmpph {k}, {a}, {b}, {imm8}", + k = lateout(kreg) dst, + a = in($reg) $a, + b = in($reg) $b, + imm8 = const IMM5, + options(pure, nomem, nostack) + ); + dst + }}; + ($mask_type: ty, $mask: expr, $reg: ident, $a: expr, $b: expr) => {{ + let dst: $mask_type; + crate::arch::asm!( + "vcmpph {k} {{ {mask} }}, {a}, {b}, {imm8}", + k = lateout(kreg) dst, + mask = in(kreg) $mask, + a = in($reg) $a, + b = in($reg) $b, + imm8 = const IMM5, + options(pure, nomem, nostack) + ); + dst + }}; +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b based on the comparison +/// operand specified by imm8, and store the results in mask vector k. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_ph_mask) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl,avx512f,sse")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cmp_ph_mask(a: __m128h, b: __m128h) -> __mmask8 { + static_assert_uimm_bits!(IMM5, 5); + cmp_asm!(__mmask8, xmm_reg, a, b) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b based on the comparison +/// operand specified by imm8, and store the results in mask vector k using zeromask k (elements are +/// zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cmp_ph_mask) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl,avx512f,sse")] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cmp_ph_mask( + k1: __mmask8, + a: __m128h, + b: __m128h, +) -> __mmask8 { + static_assert_uimm_bits!(IMM5, 5); + cmp_asm!(__mmask8, k1, xmm_reg, a, b) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b based on the comparison +/// operand specified by imm8, and store the results in mask vector k. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmp_ph_mask) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl,avx512f,avx")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cmp_ph_mask(a: __m256h, b: __m256h) -> __mmask16 { + static_assert_uimm_bits!(IMM5, 5); + cmp_asm!(__mmask16, ymm_reg, a, b) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b based on the comparison +/// operand specified by imm8, and store the results in mask vector k using zeromask k (elements are +/// zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cmp_ph_mask) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl,avx512f,avx")] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_cmp_ph_mask( + k1: __mmask16, + a: __m256h, + b: __m256h, +) -> __mmask16 { + static_assert_uimm_bits!(IMM5, 5); + cmp_asm!(__mmask16, k1, ymm_reg, a, b) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b based on the comparison +/// operand specified by imm8, and store the results in mask vector k. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_ph_mask) +#[inline] +#[target_feature(enable = "avx512fp16,avx512bw,avx512f")] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cmp_ph_mask(a: __m512h, b: __m512h) -> __mmask32 { + static_assert_uimm_bits!(IMM5, 5); + cmp_asm!(__mmask32, zmm_reg, a, b) +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b based on the comparison +/// operand specified by imm8, and store the results in mask vector k using zeromask k (elements are +/// zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_ph_mask) +#[inline] +#[target_feature(enable = "avx512fp16,avx512bw,avx512f")] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cmp_ph_mask( + k1: __mmask32, + a: __m512h, + b: __m512h, +) -> __mmask32 { + static_assert_uimm_bits!(IMM5, 5); + cmp_asm!(__mmask32, k1, zmm_reg, a, b) +} + /// Compare the lower half-precision (16-bit) floating-point elements in a and b based on the comparison /// operand specified by imm8, and store the result in mask vector k. Exceptions can be suppressed by /// passing _MM_FROUND_NO_EXC in the sae parameter. @@ -10639,6 +10760,520 @@ pub unsafe fn _mm_maskz_reduce_round_sh( _mm_mask_reduce_round_sh::(_mm_setzero_ph(), k, a, b) } +/// Reduce the packed half-precision (16-bit) floating-point elements in a by addition. Returns the +/// sum of all elements in a. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_add_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_reduce_add_ph(a: __m128h) -> f16 { + let b = simd_shuffle!(a, a, [4, 5, 6, 7, 0, 1, 2, 3]); + let a = _mm_add_ph(a, b); + let b = simd_shuffle!(a, a, [2, 3, 0, 1, 4, 5, 6, 7]); + let a = _mm_add_ph(a, b); + simd_extract::<_, f16>(a, 0) + simd_extract::<_, f16>(a, 1) +} + +/// Reduce the packed half-precision (16-bit) floating-point elements in a by addition. Returns the +/// sum of all elements in a. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_reduce_add_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_reduce_add_ph(a: __m256h) -> f16 { + let p = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let q = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + _mm_reduce_add_ph(_mm_add_ph(p, q)) +} + +/// Reduce the packed half-precision (16-bit) floating-point elements in a by addition. Returns the +/// sum of all elements in a. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_add_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_reduce_add_ph(a: __m512h) -> f16 { + let p = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let q = simd_shuffle!( + a, + a, + [16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31] + ); + _mm256_reduce_add_ph(_mm256_add_ph(p, q)) +} + +/// Reduce the packed half-precision (16-bit) floating-point elements in a by multiplication. Returns +/// the product of all elements in a. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_mul_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_reduce_mul_ph(a: __m128h) -> f16 { + let b = simd_shuffle!(a, a, [4, 5, 6, 7, 0, 1, 2, 3]); + let a = _mm_mul_ph(a, b); + let b = simd_shuffle!(a, a, [2, 3, 0, 1, 4, 5, 6, 7]); + let a = _mm_mul_ph(a, b); + simd_extract::<_, f16>(a, 0) * simd_extract::<_, f16>(a, 1) +} + +/// Reduce the packed half-precision (16-bit) floating-point elements in a by multiplication. Returns +/// the product of all elements in a. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_reduce_mul_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_reduce_mul_ph(a: __m256h) -> f16 { + let p = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let q = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + _mm_reduce_mul_ph(_mm_mul_ph(p, q)) +} + +/// Reduce the packed half-precision (16-bit) floating-point elements in a by multiplication. Returns +/// the product of all elements in a. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_mul_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_reduce_mul_ph(a: __m512h) -> f16 { + let p = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let q = simd_shuffle!( + a, + a, + [16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31] + ); + _mm256_reduce_mul_ph(_mm256_mul_ph(p, q)) +} + +/// Reduce the packed half-precision (16-bit) floating-point elements in a by minimum. Returns the +/// minimum of all elements in a. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_min_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_reduce_min_ph(a: __m128h) -> f16 { + let b = simd_shuffle!(a, a, [4, 5, 6, 7, 0, 1, 2, 3]); + let a = _mm_min_ph(a, b); + let b = simd_shuffle!(a, a, [2, 3, 0, 1, 4, 5, 6, 7]); + let a = _mm_min_ph(a, b); + let b = simd_shuffle!(a, a, [1, 0, 2, 3, 4, 5, 6, 7]); + simd_extract!(_mm_min_sh(a, b), 0) +} + +/// Reduce the packed half-precision (16-bit) floating-point elements in a by minimum. Returns the +/// minimum of all elements in a. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_reduce_min_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_reduce_min_ph(a: __m256h) -> f16 { + let p = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let q = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + _mm_reduce_min_ph(_mm_min_ph(p, q)) +} + +/// Reduce the packed half-precision (16-bit) floating-point elements in a by minimum. Returns the +/// minimum of all elements in a. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_min_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_reduce_min_ph(a: __m512h) -> f16 { + let p = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let q = simd_shuffle!( + a, + a, + [16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31] + ); + _mm256_reduce_min_ph(_mm256_min_ph(p, q)) +} + +/// Reduce the packed half-precision (16-bit) floating-point elements in a by maximum. Returns the +/// maximum of all elements in a. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_reduce_max_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_reduce_max_ph(a: __m128h) -> f16 { + let b = simd_shuffle!(a, a, [4, 5, 6, 7, 0, 1, 2, 3]); + let a = _mm_max_ph(a, b); + let b = simd_shuffle!(a, a, [2, 3, 0, 1, 4, 5, 6, 7]); + let a = _mm_max_ph(a, b); + let b = simd_shuffle!(a, a, [1, 0, 2, 3, 4, 5, 6, 7]); + simd_extract!(_mm_max_sh(a, b), 0) +} + +/// Reduce the packed half-precision (16-bit) floating-point elements in a by maximum. Returns the +/// maximum of all elements in a. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_reduce_max_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_reduce_max_ph(a: __m256h) -> f16 { + let p = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); + let q = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]); + _mm_reduce_max_ph(_mm_max_ph(p, q)) +} + +/// Reduce the packed half-precision (16-bit) floating-point elements in a by maximum. Returns the +/// maximum of all elements in a. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_reduce_max_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_reduce_max_ph(a: __m512h) -> f16 { + let p = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + let q = simd_shuffle!( + a, + a, + [16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31] + ); + _mm256_reduce_max_ph(_mm256_max_ph(p, q)) +} + +macro_rules! fpclass_asm { + ($mask_type: ty, $reg: ident, $a: expr) => {{ + let dst: $mask_type; + crate::arch::asm!( + "vfpclassph {k}, {src}, {imm8}", + k = lateout(kreg) dst, + src = in($reg) $a, + imm8 = const IMM8, + options(pure, nomem, nostack) + ); + dst + }}; + ($mask_type: ty, $mask: expr, $reg: ident, $a: expr) => {{ + let dst: $mask_type; + crate::arch::asm!( + "vfpclassph {k} {{ {mask} }}, {src}, {imm8}", + k = lateout(kreg) dst, + mask = in(kreg) $mask, + src = in($reg) $a, + imm8 = const IMM8, + options(pure, nomem, nostack) + ); + dst + }}; +} + +/// Test packed half-precision (16-bit) floating-point elements in a for special categories specified +/// by imm8, and store the results in mask vector k. +/// imm can be a combination of: +/// +/// 0x01 // QNaN +/// 0x02 // Positive Zero +/// 0x04 // Negative Zero +/// 0x08 // Positive Infinity +/// 0x10 // Negative Infinity +/// 0x20 // Denormal +/// 0x40 // Negative +/// 0x80 // SNaN +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fpclass_ph_mask) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl,avx512f,sse")] +#[cfg_attr(test, assert_instr(vfpclassph, IMM8 = 0))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fpclass_ph_mask(a: __m128h) -> __mmask8 { + static_assert_uimm_bits!(IMM8, 8); + fpclass_asm!(__mmask8, xmm_reg, a) +} + +/// Test packed half-precision (16-bit) floating-point elements in a for special categories specified +/// by imm8, and store the results in mask vector k using zeromask k (elements are zeroed out when the +/// corresponding mask bit is not set). +/// imm can be a combination of: +/// +/// 0x01 // QNaN +/// 0x02 // Positive Zero +/// 0x04 // Negative Zero +/// 0x08 // Positive Infinity +/// 0x10 // Negative Infinity +/// 0x20 // Denormal +/// 0x40 // Negative +/// 0x80 // SNaN +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fpclass_ph_mask) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl,avx512f,sse")] +#[cfg_attr(test, assert_instr(vfpclassph, IMM8 = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fpclass_ph_mask(k1: __mmask8, a: __m128h) -> __mmask8 { + static_assert_uimm_bits!(IMM8, 8); + fpclass_asm!(__mmask8, k1, xmm_reg, a) +} + +/// Test packed half-precision (16-bit) floating-point elements in a for special categories specified +/// by imm8, and store the results in mask vector k. +/// imm can be a combination of: +/// +/// 0x01 // QNaN +/// 0x02 // Positive Zero +/// 0x04 // Negative Zero +/// 0x08 // Positive Infinity +/// 0x10 // Negative Infinity +/// 0x20 // Denormal +/// 0x40 // Negative +/// 0x80 // SNaN +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_fpclass_ph_mask) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl,avx512f,avx")] +#[cfg_attr(test, assert_instr(vfpclassph, IMM8 = 0))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_fpclass_ph_mask(a: __m256h) -> __mmask16 { + static_assert_uimm_bits!(IMM8, 8); + fpclass_asm!(__mmask16, ymm_reg, a) +} + +/// Test packed half-precision (16-bit) floating-point elements in a for special categories specified +/// by imm8, and store the results in mask vector k using zeromask k (elements are zeroed out when the +/// corresponding mask bit is not set). +/// imm can be a combination of: +/// +/// 0x01 // QNaN +/// 0x02 // Positive Zero +/// 0x04 // Negative Zero +/// 0x08 // Positive Infinity +/// 0x10 // Negative Infinity +/// 0x20 // Denormal +/// 0x40 // Negative +/// 0x80 // SNaN +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_fpclass_ph_mask) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl,avx512f,avx")] +#[cfg_attr(test, assert_instr(vfpclassph, IMM8 = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_fpclass_ph_mask(k1: __mmask16, a: __m256h) -> __mmask16 { + static_assert_uimm_bits!(IMM8, 8); + fpclass_asm!(__mmask16, k1, ymm_reg, a) +} + +/// Test packed half-precision (16-bit) floating-point elements in a for special categories specified +/// by imm8, and store the results in mask vector k. +/// imm can be a combination of: +/// +/// 0x01 // QNaN +/// 0x02 // Positive Zero +/// 0x04 // Negative Zero +/// 0x08 // Positive Infinity +/// 0x10 // Negative Infinity +/// 0x20 // Denormal +/// 0x40 // Negative +/// 0x80 // SNaN +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_fpclass_ph_mask) +#[inline] +#[target_feature(enable = "avx512fp16,avx512bw,avx512f")] +#[cfg_attr(test, assert_instr(vfpclassph, IMM8 = 0))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_fpclass_ph_mask(a: __m512h) -> __mmask32 { + static_assert_uimm_bits!(IMM8, 8); + fpclass_asm!(__mmask32, zmm_reg, a) +} + +/// Test packed half-precision (16-bit) floating-point elements in a for special categories specified +/// by imm8, and store the results in mask vector k using zeromask k (elements are zeroed out when the +/// corresponding mask bit is not set). +/// imm can be a combination of: +/// +/// 0x01 // QNaN +/// 0x02 // Positive Zero +/// 0x04 // Negative Zero +/// 0x08 // Positive Infinity +/// 0x10 // Negative Infinity +/// 0x20 // Denormal +/// 0x40 // Negative +/// 0x80 // SNaN +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_fpclass_ph_mask) +#[inline] +#[target_feature(enable = "avx512fp16,avx512bw,avx512f")] +#[cfg_attr(test, assert_instr(vfpclassph, IMM8 = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_fpclass_ph_mask(k1: __mmask32, a: __m512h) -> __mmask32 { + static_assert_uimm_bits!(IMM8, 8); + fpclass_asm!(__mmask32, k1, zmm_reg, a) +} + +/// Test the lower half-precision (16-bit) floating-point element in a for special categories specified +/// by imm8, and store the result in mask vector k. +/// imm can be a combination of: +/// +/// 0x01 // QNaN +/// 0x02 // Positive Zero +/// 0x04 // Negative Zero +/// 0x08 // Positive Infinity +/// 0x10 // Negative Infinity +/// 0x20 // Denormal +/// 0x40 // Negative +/// 0x80 // SNaN +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fpclass_sh_mask) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfpclasssh, IMM8 = 0))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_fpclass_sh_mask(a: __m128h) -> __mmask8 { + _mm_mask_fpclass_sh_mask::(0xff, a) +} + +/// Test the lower half-precision (16-bit) floating-point element in a for special categories specified +/// by imm8, and store the result in mask vector k using zeromask k (elements are zeroed out when the +/// corresponding mask bit is not set). +/// imm can be a combination of: +/// +/// 0x01 // QNaN +/// 0x02 // Positive Zero +/// 0x04 // Negative Zero +/// 0x08 // Positive Infinity +/// 0x10 // Negative Infinity +/// 0x20 // Denormal +/// 0x40 // Negative +/// 0x80 // SNaN +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_fpclass_sh_mask) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vfpclasssh, IMM8 = 0))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_fpclass_sh_mask(k1: __mmask8, a: __m128h) -> __mmask8 { + static_assert_uimm_bits!(IMM8, 8); + vfpclasssh(a, IMM8, k1) +} + +/// Blend packed half-precision (16-bit) floating-point elements from a and b using control mask k, +/// and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_blend_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_blend_ph(k: __mmask8, a: __m128h, b: __m128h) -> __m128h { + simd_select_bitmask(k, b, a) +} + +/// Blend packed half-precision (16-bit) floating-point elements from a and b using control mask k, +/// and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_blend_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_blend_ph(k: __mmask16, a: __m256h, b: __m256h) -> __m256h { + simd_select_bitmask(k, b, a) +} + +/// Blend packed half-precision (16-bit) floating-point elements from a and b using control mask k, +/// and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_blend_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_blend_ph(k: __mmask32, a: __m512h, b: __m512h) -> __m512h { + simd_select_bitmask(k, b, a) +} + +/// Shuffle half-precision (16-bit) floating-point elements in a and b using the corresponding selector +/// and index in idx, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permutex2var_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_permutex2var_ph(a: __m128h, idx: __m128i, b: __m128h) -> __m128h { + _mm_castsi128_ph(_mm_permutex2var_epi16( + _mm_castph_si128(a), + idx, + _mm_castph_si128(b), + )) +} + +/// Shuffle half-precision (16-bit) floating-point elements in a and b using the corresponding selector +/// and index in idx, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutex2var_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_permutex2var_ph(a: __m256h, idx: __m256i, b: __m256h) -> __m256h { + _mm256_castsi256_ph(_mm256_permutex2var_epi16( + _mm256_castph_si256(a), + idx, + _mm256_castph_si256(b), + )) +} + +/// Shuffle half-precision (16-bit) floating-point elements in a and b using the corresponding selector +/// and index in idx, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutex2var_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_permutex2var_ph(a: __m512h, idx: __m512i, b: __m512h) -> __m512h { + _mm512_castsi512_ph(_mm512_permutex2var_epi16( + _mm512_castph_si512(a), + idx, + _mm512_castph_si512(b), + )) +} + +/// Shuffle half-precision (16-bit) floating-point elements in a using the corresponding index in idx, +/// and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permutexvar_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_permutexvar_ph(idx: __m128i, a: __m128h) -> __m128h { + _mm_castsi128_ph(_mm_permutexvar_epi16(idx, _mm_castph_si128(a))) +} + +/// Shuffle half-precision (16-bit) floating-point elements in a using the corresponding index in idx, +/// and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutexvar_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_permutexvar_ph(idx: __m256i, a: __m256h) -> __m256h { + _mm256_castsi256_ph(_mm256_permutexvar_epi16(idx, _mm256_castph_si256(a))) +} + +/// Shuffle half-precision (16-bit) floating-point elements in a using the corresponding index in idx, +/// and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_permutexvar_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_permutexvar_ph(idx: __m512i, a: __m512h) -> __m512h { + _mm512_castsi512_ph(_mm512_permutexvar_epi16(idx, _mm512_castph_si512(a))) +} + #[allow(improper_ctypes)] extern "C" { #[link_name = "llvm.x86.avx512fp16.mask.cmp.sh"] @@ -10832,6 +11467,9 @@ extern "C" { #[link_name = "llvm.x86.avx512fp16.mask.reduce.sh"] fn vreducesh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, imm8: i32, sae: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.fpclass.sh"] + fn vfpclasssh(a: __m128h, imm8: i32, k: __mmask8) -> __mmask8; } #[cfg(test)] @@ -11216,6 +11854,80 @@ mod tests { assert_eq_m512h(r, e); } + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_cmp_ph_mask() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0); + let r = _mm_cmp_ph_mask::<_CMP_EQ_OQ>(a, b); + assert_eq!(r, 0b11110000); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_cmp_ph_mask() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0); + let r = _mm_mask_cmp_ph_mask::<_CMP_EQ_OQ>(0b01010101, a, b); + assert_eq!(r, 0b01010000); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_cmp_ph_mask() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0, 9.0, 10.0, 11.0, 12.0, -13.0, -14.0, -15.0, + -16.0, + ); + let r = _mm256_cmp_ph_mask::<_CMP_EQ_OQ>(a, b); + assert_eq!(r, 0b1111000011110000); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_cmp_ph_mask() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0, 9.0, 10.0, 11.0, 12.0, -13.0, -14.0, -15.0, + -16.0, + ); + let r = _mm256_mask_cmp_ph_mask::<_CMP_EQ_OQ>(0b0101010101010101, a, b); + assert_eq!(r, 0b0101000001010000); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_cmp_ph_mask() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0, 9.0, 10.0, 11.0, 12.0, -13.0, -14.0, -15.0, + -16.0, 17.0, 18.0, 19.0, 20.0, -21.0, -22.0, -23.0, -24.0, 25.0, 26.0, 27.0, 28.0, + -29.0, -30.0, -31.0, -32.0, + ); + let r = _mm512_cmp_ph_mask::<_CMP_EQ_OQ>(a, b); + assert_eq!(r, 0b11110000111100001111000011110000); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_cmp_ph_mask() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0, 9.0, 10.0, 11.0, 12.0, -13.0, -14.0, -15.0, + -16.0, 17.0, 18.0, 19.0, 20.0, -21.0, -22.0, -23.0, -24.0, 25.0, 26.0, 27.0, 28.0, + -29.0, -30.0, -31.0, -32.0, + ); + let r = _mm512_mask_cmp_ph_mask::<_CMP_EQ_OQ>(0b01010101010101010101010101010101, a, b); + assert_eq!(r, 0b01010000010100000101000001010000); + } + #[simd_test(enable = "avx512fp16")] unsafe fn test_mm_cmp_round_sh_mask() { let a = _mm_set_sh(1.0); @@ -17754,4 +18466,418 @@ mod tests { let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_reduce_add_ph() { + let a = _mm_set1_ph(2.0); + let r = _mm_reduce_add_ph(a); + assert_eq!(r, 16.0); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_reduce_add_ph() { + let a = _mm256_set1_ph(2.0); + let r = _mm256_reduce_add_ph(a); + assert_eq!(r, 32.0); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_reduce_add_ph() { + let a = _mm512_set1_ph(2.0); + let r = _mm512_reduce_add_ph(a); + assert_eq!(r, 64.0); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_reduce_mul_ph() { + let a = _mm_set1_ph(2.0); + let r = _mm_reduce_mul_ph(a); + assert_eq!(r, 256.0); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_reduce_mul_ph() { + let a = _mm256_set1_ph(2.0); + let r = _mm256_reduce_mul_ph(a); + assert_eq!(r, 65536.0); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_reduce_mul_ph() { + let a = _mm512_set1_ph(2.0); + let r = _mm512_reduce_mul_ph(a); + assert_eq!(r, 16777216.0); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_reduce_max_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_reduce_max_ph(a); + assert_eq!(r, 8.0); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_reduce_max_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let r = _mm256_reduce_max_ph(a); + assert_eq!(r, 16.0); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_reduce_max_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let r = _mm512_reduce_max_ph(a); + assert_eq!(r, 32.0); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_reduce_min_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_reduce_min_ph(a); + assert_eq!(r, 1.0); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_reduce_min_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let r = _mm256_reduce_min_ph(a); + assert_eq!(r, 1.0); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_reduce_min_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let r = _mm512_reduce_min_ph(a); + assert_eq!(r, 1.0); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_fpclass_ph_mask() { + let a = _mm_set_ph( + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + ); + let r = _mm_fpclass_ph_mask::<0x18>(a); // infinities + assert_eq!(r, 0b01100000); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_fpclass_ph_mask() { + let a = _mm_set_ph( + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + ); + let r = _mm_mask_fpclass_ph_mask::<0x18>(0b01010101, a); + assert_eq!(r, 0b01000000); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_fpclass_ph_mask() { + let a = _mm256_set_ph( + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + ); + let r = _mm256_fpclass_ph_mask::<0x18>(a); // infinities + assert_eq!(r, 0b0110000001100000); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_fpclass_ph_mask() { + let a = _mm256_set_ph( + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + ); + let r = _mm256_mask_fpclass_ph_mask::<0x18>(0b0101010101010101, a); + assert_eq!(r, 0b0100000001000000); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_fpclass_ph_mask() { + let a = _mm512_set_ph( + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + ); + let r = _mm512_fpclass_ph_mask::<0x18>(a); // infinities + assert_eq!(r, 0b01100000011000000110000001100000); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_fpclass_ph_mask() { + let a = _mm512_set_ph( + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + ); + let r = _mm512_mask_fpclass_ph_mask::<0x18>(0b01010101010101010101010101010101, a); + assert_eq!(r, 0b01000000010000000100000001000000); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_fpclass_sh_mask() { + let a = _mm_set_sh(f16::INFINITY); + let r = _mm_fpclass_sh_mask::<0x18>(a); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_fpclass_sh_mask() { + let a = _mm_set_sh(f16::INFINITY); + let r = _mm_mask_fpclass_sh_mask::<0x18>(0, a); + assert_eq!(r, 0); + let r = _mm_mask_fpclass_sh_mask::<0x18>(1, a); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_blend_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(-1.0, -2.0, -3.0, -4.0, -5.0, -6.0, -7.0, -8.0); + let r = _mm_mask_blend_ph(0b01010101, a, b); + let e = _mm_set_ph(1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_blend_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + -1.0, -2.0, -3.0, -4.0, -5.0, -6.0, -7.0, -8.0, -9.0, -10.0, -11.0, -12.0, -13.0, + -14.0, -15.0, -16.0, + ); + let r = _mm256_mask_blend_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, -14.0, 15.0, + -16.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_blend_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + -1.0, -2.0, -3.0, -4.0, -5.0, -6.0, -7.0, -8.0, -9.0, -10.0, -11.0, -12.0, -13.0, + -14.0, -15.0, -16.0, -17.0, -18.0, -19.0, -20.0, -21.0, -22.0, -23.0, -24.0, -25.0, + -26.0, -27.0, -28.0, -29.0, -30.0, -31.0, -32.0, + ); + let r = _mm512_mask_blend_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, -14.0, 15.0, + -16.0, 17.0, -18.0, 19.0, -20.0, 21.0, -22.0, 23.0, -24.0, 25.0, -26.0, 27.0, -28.0, + 29.0, -30.0, 31.0, -32.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_permutex2var_ph() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_setr_ph(9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let idx = _mm_setr_epi16(0, 2, 4, 6, 8, 10, 12, 14); + let r = _mm_permutex2var_ph(a, idx, b); + let e = _mm_setr_ph(1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_permutex2var_ph() { + let a = _mm256_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_setr_ph( + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let idx = _mm256_setr_epi16(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + let r = _mm256_permutex2var_ph(a, idx, b); + let e = _mm256_setr_ph( + 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, 23.0, 25.0, 27.0, 29.0, + 31.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_permutex2var_ph() { + let a = _mm512_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_setr_ph( + 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, + 47.0, 48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, + 61.0, 62.0, 63.0, 64.0, + ); + let idx = _mm512_set_epi16( + 62, 60, 58, 56, 54, 52, 50, 48, 46, 44, 42, 40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, + 18, 16, 14, 12, 10, 8, 6, 4, 2, 0, + ); + let r = _mm512_permutex2var_ph(a, idx, b); + let e = _mm512_setr_ph( + 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, 23.0, 25.0, 27.0, 29.0, + 31.0, 33.0, 35.0, 37.0, 39.0, 41.0, 43.0, 45.0, 47.0, 49.0, 51.0, 53.0, 55.0, 57.0, + 59.0, 61.0, 63.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_permutexvar_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let idx = _mm_set_epi16(0, 2, 4, 6, 1, 3, 5, 7); + let r = _mm_permutexvar_ph(idx, a); + let e = _mm_setr_ph(1.0, 3.0, 5.0, 7.0, 2.0, 4.0, 6.0, 8.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_permutexvar_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let idx = _mm256_set_epi16(0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15); + let r = _mm256_permutexvar_ph(idx, a); + let e = _mm256_setr_ph( + 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_permutexvar_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let idx = _mm512_set_epi16( + 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 1, 3, 5, 7, 9, 11, 13, 15, + 17, 19, 21, 23, 25, 27, 29, 31, + ); + let r = _mm512_permutexvar_ph(idx, a); + let e = _mm512_setr_ph( + 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, 23.0, 25.0, 27.0, 29.0, + 31.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0, 22.0, 24.0, 26.0, 28.0, + 30.0, 32.0, + ); + assert_eq_m512h(r, e); + } } From 2ae57f04a5dfab907e9dfc938de00c4e6bdf3619 Mon Sep 17 00:00:00 2001 From: sayantn Date: Tue, 16 Jul 2024 12:37:21 +0530 Subject: [PATCH 08/11] AVX512FP16 Part 7: Convert to f16 --- crates/core_arch/missing-x86.md | 116 - crates/core_arch/src/x86/avx512fp16.rs | 12052 ++++++++++++-------- crates/core_arch/src/x86_64/avx512fp16.rs | 129 + crates/core_arch/src/x86_64/mod.rs | 4 + 4 files changed, 7689 insertions(+), 4612 deletions(-) create mode 100644 crates/core_arch/src/x86_64/avx512fp16.rs diff --git a/crates/core_arch/missing-x86.md b/crates/core_arch/missing-x86.md index 8fee3cd36f..1c2d0a6d7b 100644 --- a/crates/core_arch/missing-x86.md +++ b/crates/core_arch/missing-x86.md @@ -56,13 +56,6 @@ * [ ] [`_mm256_cvtsh_h`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtsh_h) * [ ] [`_mm256_set1_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set1_pch) * [ ] [`_mm512_cmp_round_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_round_ph_mask) - * [ ] [`_mm512_cvt_roundepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundepi16_ph) - * [ ] [`_mm512_cvt_roundepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundepi32_ph) - * [ ] [`_mm512_cvt_roundepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundepi64_ph) - * [ ] [`_mm512_cvt_roundepu16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundepu16_ph) - * [ ] [`_mm512_cvt_roundepu32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundepu32_ph) - * [ ] [`_mm512_cvt_roundepu64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundepu64_ph) - * [ ] [`_mm512_cvt_roundpd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundpd_ph) * [ ] [`_mm512_cvt_roundph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundph_epi16) * [ ] [`_mm512_cvt_roundph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundph_epi32) * [ ] [`_mm512_cvt_roundph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundph_epi64) @@ -70,13 +63,6 @@ * [ ] [`_mm512_cvt_roundph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundph_epu32) * [ ] [`_mm512_cvt_roundph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundph_epu64) * [ ] [`_mm512_cvt_roundph_pd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundph_pd) - * [ ] [`_mm512_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepi16_ph) - * [ ] [`_mm512_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepi32_ph) - * [ ] [`_mm512_cvtepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepi64_ph) - * [ ] [`_mm512_cvtepu16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepu16_ph) - * [ ] [`_mm512_cvtepu32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepu32_ph) - * [ ] [`_mm512_cvtepu64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepu64_ph) - * [ ] [`_mm512_cvtpd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtpd_ph) * [ ] [`_mm512_cvtph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtph_epi16) * [ ] [`_mm512_cvtph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtph_epi32) * [ ] [`_mm512_cvtph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtph_epi64) @@ -98,17 +84,8 @@ * [ ] [`_mm512_cvttph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvttph_epu32) * [ ] [`_mm512_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvttph_epu64) * [ ] [`_mm512_cvtx_roundph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtx_roundph_ps) - * [ ] [`_mm512_cvtx_roundps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtx_roundps_ph) * [ ] [`_mm512_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtxph_ps) - * [ ] [`_mm512_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtxps_ph) * [ ] [`_mm512_mask_cmp_round_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_round_ph_mask) - * [ ] [`_mm512_mask_cvt_roundepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundepi16_ph) - * [ ] [`_mm512_mask_cvt_roundepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundepi32_ph) - * [ ] [`_mm512_mask_cvt_roundepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundepi64_ph) - * [ ] [`_mm512_mask_cvt_roundepu16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundepu16_ph) - * [ ] [`_mm512_mask_cvt_roundepu32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundepu32_ph) - * [ ] [`_mm512_mask_cvt_roundepu64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundepu64_ph) - * [ ] [`_mm512_mask_cvt_roundpd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundpd_ph) * [ ] [`_mm512_mask_cvt_roundph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundph_epi16) * [ ] [`_mm512_mask_cvt_roundph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundph_epi32) * [ ] [`_mm512_mask_cvt_roundph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundph_epi64) @@ -116,13 +93,6 @@ * [ ] [`_mm512_mask_cvt_roundph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundph_epu32) * [ ] [`_mm512_mask_cvt_roundph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundph_epu64) * [ ] [`_mm512_mask_cvt_roundph_pd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundph_pd) - * [ ] [`_mm512_mask_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi16_ph) - * [ ] [`_mm512_mask_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi32_ph) - * [ ] [`_mm512_mask_cvtepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi64_ph) - * [ ] [`_mm512_mask_cvtepu16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepu16_ph) - * [ ] [`_mm512_mask_cvtepu32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepu32_ph) - * [ ] [`_mm512_mask_cvtepu64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepu64_ph) - * [ ] [`_mm512_mask_cvtpd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtpd_ph) * [ ] [`_mm512_mask_cvtph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtph_epi16) * [ ] [`_mm512_mask_cvtph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtph_epi32) * [ ] [`_mm512_mask_cvtph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtph_epi64) @@ -143,16 +113,7 @@ * [ ] [`_mm512_mask_cvttph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvttph_epu32) * [ ] [`_mm512_mask_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvttph_epu64) * [ ] [`_mm512_mask_cvtx_roundph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtx_roundph_ps) - * [ ] [`_mm512_mask_cvtx_roundps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtx_roundps_ph) * [ ] [`_mm512_mask_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtxph_ps) - * [ ] [`_mm512_mask_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtxps_ph) - * [ ] [`_mm512_maskz_cvt_roundepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepi16_ph) - * [ ] [`_mm512_maskz_cvt_roundepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepi32_ph) - * [ ] [`_mm512_maskz_cvt_roundepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepi64_ph) - * [ ] [`_mm512_maskz_cvt_roundepu16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepu16_ph) - * [ ] [`_mm512_maskz_cvt_roundepu32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepu32_ph) - * [ ] [`_mm512_maskz_cvt_roundepu64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepu64_ph) - * [ ] [`_mm512_maskz_cvt_roundpd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundpd_ph) * [ ] [`_mm512_maskz_cvt_roundph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundph_epi16) * [ ] [`_mm512_maskz_cvt_roundph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundph_epi32) * [ ] [`_mm512_maskz_cvt_roundph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundph_epi64) @@ -160,13 +121,6 @@ * [ ] [`_mm512_maskz_cvt_roundph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundph_epu32) * [ ] [`_mm512_maskz_cvt_roundph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundph_epu64) * [ ] [`_mm512_maskz_cvt_roundph_pd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundph_pd) - * [ ] [`_mm512_maskz_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepi16_ph) - * [ ] [`_mm512_maskz_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepi32_ph) - * [ ] [`_mm512_maskz_cvtepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepi64_ph) - * [ ] [`_mm512_maskz_cvtepu16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepu16_ph) - * [ ] [`_mm512_maskz_cvtepu32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepu32_ph) - * [ ] [`_mm512_maskz_cvtepu64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepu64_ph) - * [ ] [`_mm512_maskz_cvtpd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtpd_ph) * [ ] [`_mm512_maskz_cvtph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtph_epi16) * [ ] [`_mm512_maskz_cvtph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtph_epi32) * [ ] [`_mm512_maskz_cvtph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtph_epi64) @@ -187,25 +141,14 @@ * [ ] [`_mm512_maskz_cvttph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvttph_epu32) * [ ] [`_mm512_maskz_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvttph_epu64) * [ ] [`_mm512_maskz_cvtx_roundph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtx_roundph_ps) - * [ ] [`_mm512_maskz_cvtx_roundps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtx_roundps_ph) * [ ] [`_mm512_maskz_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtxph_ps) - * [ ] [`_mm512_maskz_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtxps_ph) * [ ] [`_mm512_set1_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set1_pch) - * [ ] [`_mm_cvt_roundi32_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundi32_sh) - * [ ] [`_mm_cvt_roundi64_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundi64_sh) - * [ ] [`_mm_cvt_roundsd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsd_sh) * [ ] [`_mm_cvt_roundsh_i32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsh_i32) * [ ] [`_mm_cvt_roundsh_i64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsh_i64) * [ ] [`_mm_cvt_roundsh_sd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsh_sd) * [ ] [`_mm_cvt_roundsh_ss`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsh_ss) * [ ] [`_mm_cvt_roundsh_u32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsh_u32) * [ ] [`_mm_cvt_roundsh_u64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsh_u64) - * [ ] [`_mm_cvt_roundss_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundss_sh) - * [ ] [`_mm_cvt_roundu32_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundu32_sh) - * [ ] [`_mm_cvt_roundu64_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundu64_sh) - * [ ] [`_mm_cvti32_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvti32_sh) - * [ ] [`_mm_cvti64_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvti64_sh) - * [ ] [`_mm_cvtsd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsd_sh) * [ ] [`_mm_cvtsh_h`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsh_h) * [ ] [`_mm_cvtsh_i32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsh_i32) * [ ] [`_mm_cvtsh_i64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsh_i64) @@ -215,7 +158,6 @@ * [ ] [`_mm_cvtsh_u64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsh_u64) * [ ] [`_mm_cvtsi128_si16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi128_si16) * [ ] [`_mm_cvtsi16_si128`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi16_si128) - * [ ] [`_mm_cvtss_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtss_sh) * [ ] [`_mm_cvtt_roundsh_i32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_roundsh_i32) * [ ] [`_mm_cvtt_roundsh_i64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_roundsh_i64) * [ ] [`_mm_cvtt_roundsh_u32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_roundsh_u32) @@ -224,37 +166,20 @@ * [ ] [`_mm_cvttsh_i64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsh_i64) * [ ] [`_mm_cvttsh_u32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsh_u32) * [ ] [`_mm_cvttsh_u64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsh_u64) - * [ ] [`_mm_cvtu32_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtu32_sh) - * [ ] [`_mm_cvtu64_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtu64_sh) - * [ ] [`_mm_mask_cvt_roundsd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvt_roundsd_sh) * [ ] [`_mm_mask_cvt_roundsh_sd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvt_roundsh_sd) * [ ] [`_mm_mask_cvt_roundsh_ss`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvt_roundsh_ss) - * [ ] [`_mm_mask_cvt_roundss_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvt_roundss_sh) - * [ ] [`_mm_mask_cvtsd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsd_sh) * [ ] [`_mm_mask_cvtsh_sd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsh_sd) * [ ] [`_mm_mask_cvtsh_ss`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsh_ss) - * [ ] [`_mm_mask_cvtss_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtss_sh) - * [ ] [`_mm_maskz_cvt_roundsd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvt_roundsd_sh) * [ ] [`_mm_maskz_cvt_roundsh_sd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvt_roundsh_sd) * [ ] [`_mm_maskz_cvt_roundsh_ss`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvt_roundsh_ss) - * [ ] [`_mm_maskz_cvt_roundss_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvt_roundss_sh) - * [ ] [`_mm_maskz_cvtsd_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtsd_sh) * [ ] [`_mm_maskz_cvtsh_sd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtsh_sd) * [ ] [`_mm_maskz_cvtsh_ss`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtsh_ss) - * [ ] [`_mm_maskz_cvtss_sh`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtss_sh) * [ ] [`_mm_set1_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set1_pch)

["AVX512_FP16", "AVX512VL"]

- * [ ] [`_mm256_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi16_ph) - * [ ] [`_mm256_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi32_ph) - * [ ] [`_mm256_cvtepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi64_ph) - * [ ] [`_mm256_cvtepu16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepu16_ph) - * [ ] [`_mm256_cvtepu32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepu32_ph) - * [ ] [`_mm256_cvtepu64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepu64_ph) - * [ ] [`_mm256_cvtpd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtpd_ph) * [ ] [`_mm256_cvtph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtph_epi16) * [ ] [`_mm256_cvtph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtph_epi32) * [ ] [`_mm256_cvtph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtph_epi64) @@ -269,14 +194,6 @@ * [ ] [`_mm256_cvttph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvttph_epu32) * [ ] [`_mm256_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvttph_epu64) * [ ] [`_mm256_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtxph_ps) - * [ ] [`_mm256_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtxps_ph) - * [ ] [`_mm256_mask_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi16_ph) - * [ ] [`_mm256_mask_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi32_ph) - * [ ] [`_mm256_mask_cvtepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi64_ph) - * [ ] [`_mm256_mask_cvtepu16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepu16_ph) - * [ ] [`_mm256_mask_cvtepu32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepu32_ph) - * [ ] [`_mm256_mask_cvtepu64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepu64_ph) - * [ ] [`_mm256_mask_cvtpd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtpd_ph) * [ ] [`_mm256_mask_cvtph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtph_epi16) * [ ] [`_mm256_mask_cvtph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtph_epi32) * [ ] [`_mm256_mask_cvtph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtph_epi64) @@ -291,14 +208,6 @@ * [ ] [`_mm256_mask_cvttph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvttph_epu32) * [ ] [`_mm256_mask_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvttph_epu64) * [ ] [`_mm256_mask_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtxph_ps) - * [ ] [`_mm256_mask_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtxps_ph) - * [ ] [`_mm256_maskz_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi16_ph) - * [ ] [`_mm256_maskz_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi32_ph) - * [ ] [`_mm256_maskz_cvtepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi64_ph) - * [ ] [`_mm256_maskz_cvtepu16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepu16_ph) - * [ ] [`_mm256_maskz_cvtepu32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepu32_ph) - * [ ] [`_mm256_maskz_cvtepu64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepu64_ph) - * [ ] [`_mm256_maskz_cvtpd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtpd_ph) * [ ] [`_mm256_maskz_cvtph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtph_epi16) * [ ] [`_mm256_maskz_cvtph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtph_epi32) * [ ] [`_mm256_maskz_cvtph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtph_epi64) @@ -313,14 +222,6 @@ * [ ] [`_mm256_maskz_cvttph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvttph_epu32) * [ ] [`_mm256_maskz_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvttph_epu64) * [ ] [`_mm256_maskz_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtxph_ps) - * [ ] [`_mm256_maskz_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtxps_ph) - * [ ] [`_mm_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi16_ph) - * [ ] [`_mm_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi32_ph) - * [ ] [`_mm_cvtepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi64_ph) - * [ ] [`_mm_cvtepu16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepu16_ph) - * [ ] [`_mm_cvtepu32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepu32_ph) - * [ ] [`_mm_cvtepu64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepu64_ph) - * [ ] [`_mm_cvtpd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpd_ph) * [ ] [`_mm_cvtph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtph_epi16) * [ ] [`_mm_cvtph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtph_epi32) * [ ] [`_mm_cvtph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtph_epi64) @@ -335,14 +236,6 @@ * [ ] [`_mm_cvttph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttph_epu32) * [ ] [`_mm_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttph_epu64) * [ ] [`_mm_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtxph_ps) - * [ ] [`_mm_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtxps_ph) - * [ ] [`_mm_mask_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi16_ph) - * [ ] [`_mm_mask_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi32_ph) - * [ ] [`_mm_mask_cvtepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi64_ph) - * [ ] [`_mm_mask_cvtepu16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepu16_ph) - * [ ] [`_mm_mask_cvtepu32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepu32_ph) - * [ ] [`_mm_mask_cvtepu64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepu64_ph) - * [ ] [`_mm_mask_cvtpd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtpd_ph) * [ ] [`_mm_mask_cvtph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtph_epi16) * [ ] [`_mm_mask_cvtph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtph_epi32) * [ ] [`_mm_mask_cvtph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtph_epi64) @@ -357,14 +250,6 @@ * [ ] [`_mm_mask_cvttph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvttph_epu32) * [ ] [`_mm_mask_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvttph_epu64) * [ ] [`_mm_mask_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtxph_ps) - * [ ] [`_mm_mask_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtxps_ph) - * [ ] [`_mm_maskz_cvtepi16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi16_ph) - * [ ] [`_mm_maskz_cvtepi32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi32_ph) - * [ ] [`_mm_maskz_cvtepi64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi64_ph) - * [ ] [`_mm_maskz_cvtepu16_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepu16_ph) - * [ ] [`_mm_maskz_cvtepu32_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepu32_ph) - * [ ] [`_mm_maskz_cvtepu64_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepu64_ph) - * [ ] [`_mm_maskz_cvtpd_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtpd_ph) * [ ] [`_mm_maskz_cvtph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtph_epi16) * [ ] [`_mm_maskz_cvtph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtph_epi32) * [ ] [`_mm_maskz_cvtph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtph_epi64) @@ -379,7 +264,6 @@ * [ ] [`_mm_maskz_cvttph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvttph_epu32) * [ ] [`_mm_maskz_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvttph_epu64) * [ ] [`_mm_maskz_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtxph_ps) - * [ ] [`_mm_maskz_cvtxps_ph`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtxps_ph)

diff --git a/crates/core_arch/src/x86/avx512fp16.rs b/crates/core_arch/src/x86/avx512fp16.rs index 3889ce1f5e..be99002e51 100644 --- a/crates/core_arch/src/x86/avx512fp16.rs +++ b/crates/core_arch/src/x86/avx512fp16.rs @@ -11274,6 +11274,1811 @@ pub unsafe fn _mm512_permutexvar_ph(idx: __m512i, a: __m512h) -> __m512h { _mm512_castsi512_ph(_mm512_permutexvar_epi16(idx, _mm512_castph_si512(a))) } +/// Convert packed signed 16-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi16_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtw2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtepi16_ph(a: __m128i) -> __m128h { + vcvtw2ph_128(a.as_i16x8(), _MM_FROUND_CUR_DIRECTION) +} + +/// Convert packed signed 16-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using writemask k (elements are copied from src to dst when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi16_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtw2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvtepi16_ph(src: __m128h, k: __mmask8, a: __m128i) -> __m128h { + simd_select_bitmask(k, _mm_cvtepi16_ph(a), src) +} + +/// Convert packed signed 16-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi16_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtw2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvtepi16_ph(k: __mmask8, a: __m128i) -> __m128h { + _mm_mask_cvtepi16_ph(_mm_setzero_ph(), k, a) +} + +/// Convert packed signed 16-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi16_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtw2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cvtepi16_ph(a: __m256i) -> __m256h { + vcvtw2ph_256(a.as_i16x16(), _MM_FROUND_CUR_DIRECTION) +} + +/// Convert packed signed 16-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using writemask k (elements are copied from src to dst when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi16_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtw2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_cvtepi16_ph(src: __m256h, k: __mmask16, a: __m256i) -> __m256h { + simd_select_bitmask(k, _mm256_cvtepi16_ph(a), src) +} + +/// Convert packed signed 16-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi16_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtw2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_cvtepi16_ph(k: __mmask16, a: __m256i) -> __m256h { + _mm256_mask_cvtepi16_ph(_mm256_setzero_ph(), k, a) +} + +/// Convert packed signed 16-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepi16_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtw2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvtepi16_ph(a: __m512i) -> __m512h { + vcvtw2ph_512(a.as_i16x32(), _MM_FROUND_CUR_DIRECTION) +} + +/// Convert packed signed 16-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using writemask k (elements are copied from src to dst when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi16_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtw2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvtepi16_ph(src: __m512h, k: __mmask32, a: __m512i) -> __m512h { + simd_select_bitmask(k, _mm512_cvtepi16_ph(a), src) +} + +/// Convert packed signed 16-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepi16_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtw2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvtepi16_ph(k: __mmask32, a: __m512i) -> __m512h { + _mm512_mask_cvtepi16_ph(_mm512_setzero_ph(), k, a) +} + +/// Convert packed signed 16-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundepi16_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtw2ph, ROUNDING = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvt_roundepi16_ph(a: __m512i) -> __m512h { + static_assert_rounding!(ROUNDING); + vcvtw2ph_512(a.as_i16x32(), ROUNDING) +} + +/// Convert packed signed 16-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using writemask k (elements are copied from src to dst when the corresponding +/// mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundepi16_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtw2ph, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvt_roundepi16_ph( + src: __m512h, + k: __mmask32, + a: __m512i, +) -> __m512h { + static_assert_rounding!(ROUNDING); + simd_select_bitmask(k, _mm512_cvt_roundepi16_ph::(a), src) +} + +/// Convert packed signed 16-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepi16_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtw2ph, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvt_roundepi16_ph( + k: __mmask32, + a: __m512i, +) -> __m512h { + static_assert_rounding!(ROUNDING); + _mm512_mask_cvt_roundepi16_ph::(_mm512_setzero_ph(), k, a) +} + +/// Convert packed unsigned 16-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepu16_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtuw2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtepu16_ph(a: __m128i) -> __m128h { + vcvtuw2ph_128(a.as_u16x8(), _MM_FROUND_CUR_DIRECTION) +} + +/// Convert packed unsigned 16-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using writemask k (elements are copied from src to dst when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepu16_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtuw2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvtepu16_ph(src: __m128h, k: __mmask8, a: __m128i) -> __m128h { + simd_select_bitmask(k, _mm_cvtepu16_ph(a), src) +} + +/// Convert packed unsigned 16-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepu16_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtuw2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvtepu16_ph(k: __mmask8, a: __m128i) -> __m128h { + _mm_mask_cvtepu16_ph(_mm_setzero_ph(), k, a) +} + +/// Convert packed unsigned 16-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepu16_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtuw2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cvtepu16_ph(a: __m256i) -> __m256h { + vcvtuw2ph_256(a.as_u16x16(), _MM_FROUND_CUR_DIRECTION) +} + +/// Convert packed unsigned 16-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using writemask k (elements are copied from src to dst when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepu16_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtuw2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_cvtepu16_ph(src: __m256h, k: __mmask16, a: __m256i) -> __m256h { + simd_select_bitmask(k, _mm256_cvtepu16_ph(a), src) +} + +/// Convert packed unsigned 16-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepu16_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtuw2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_cvtepu16_ph(k: __mmask16, a: __m256i) -> __m256h { + _mm256_mask_cvtepu16_ph(_mm256_setzero_ph(), k, a) +} + +/// Convert packed unsigned 16-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepu16_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtuw2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvtepu16_ph(a: __m512i) -> __m512h { + vcvtuw2ph_512(a.as_u16x32(), _MM_FROUND_CUR_DIRECTION) +} + +/// Convert packed unsigned 16-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using writemask k (elements are copied from src to dst when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepu16_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtuw2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvtepu16_ph(src: __m512h, k: __mmask32, a: __m512i) -> __m512h { + simd_select_bitmask(k, _mm512_cvtepu16_ph(a), src) +} + +/// Convert packed unsigned 16-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepu16_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtuw2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvtepu16_ph(k: __mmask32, a: __m512i) -> __m512h { + _mm512_mask_cvtepu16_ph(_mm512_setzero_ph(), k, a) +} + +/// Convert packed unsigned 16-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundepu16_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtuw2ph, ROUNDING = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvt_roundepu16_ph(a: __m512i) -> __m512h { + static_assert_rounding!(ROUNDING); + vcvtuw2ph_512(a.as_u16x32(), ROUNDING) +} + +/// Convert packed unsigned 16-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using writemask k (elements are copied from src to dst when the corresponding +/// mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundepu16_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtuw2ph, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvt_roundepu16_ph( + src: __m512h, + k: __mmask32, + a: __m512i, +) -> __m512h { + static_assert_rounding!(ROUNDING); + simd_select_bitmask(k, _mm512_cvt_roundepu16_ph::(a), src) +} + +/// Convert packed unsigned 16-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepu16_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtuw2ph, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvt_roundepu16_ph( + k: __mmask32, + a: __m512i, +) -> __m512h { + static_assert_rounding!(ROUNDING); + _mm512_mask_cvt_roundepu16_ph::(_mm512_setzero_ph(), k, a) +} + +/// Convert packed signed 32-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst. The upper 64 bits of dst are zeroed out. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi32_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtdq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtepi32_ph(a: __m128i) -> __m128h { + _mm_mask_cvtepi32_ph(_mm_setzero_ph(), 0xff, a) +} + +/// Convert packed signed 32-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using writemask k (elements are copied from src to dst when the corresponding +/// mask bit is not set). The upper 64 bits of dst are zeroed out. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi32_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtdq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvtepi32_ph(src: __m128h, k: __mmask8, a: __m128i) -> __m128h { + vcvtdq2ph_128(a.as_i32x4(), src, k) +} + +/// Convert packed signed 32-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// The upper 64 bits of dst are zeroed out. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi32_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtdq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvtepi32_ph(k: __mmask8, a: __m128i) -> __m128h { + _mm_mask_cvtepi32_ph(_mm_setzero_ph(), k, a) +} + +/// Convert packed signed 32-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi32_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtdq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cvtepi32_ph(a: __m256i) -> __m128h { + vcvtdq2ph_256(a.as_i32x8(), _MM_FROUND_CUR_DIRECTION) +} + +/// Convert packed signed 32-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using writemask k (elements are copied from src to dst when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi32_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtdq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_cvtepi32_ph(src: __m128h, k: __mmask8, a: __m256i) -> __m128h { + simd_select_bitmask(k, _mm256_cvtepi32_ph(a), src) +} + +/// Convert packed signed 32-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi32_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtdq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_cvtepi32_ph(k: __mmask8, a: __m256i) -> __m128h { + _mm256_mask_cvtepi32_ph(_mm_setzero_ph(), k, a) +} + +/// Convert packed signed 32-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepi32_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtdq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvtepi32_ph(a: __m512i) -> __m256h { + vcvtdq2ph_512(a.as_i32x16(), _MM_FROUND_CUR_DIRECTION) +} + +/// Convert packed signed 32-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using writemask k (elements are copied from src to dst when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi32_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtdq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvtepi32_ph(src: __m256h, k: __mmask16, a: __m512i) -> __m256h { + simd_select_bitmask(k, _mm512_cvtepi32_ph(a), src) +} + +/// Convert packed signed 32-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepi32_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtdq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvtepi32_ph(k: __mmask16, a: __m512i) -> __m256h { + _mm512_mask_cvtepi32_ph(_mm256_setzero_ph(), k, a) +} + +/// Convert packed signed 32-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundepi32_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtdq2ph, ROUNDING = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvt_roundepi32_ph(a: __m512i) -> __m256h { + static_assert_rounding!(ROUNDING); + vcvtdq2ph_512(a.as_i32x16(), ROUNDING) +} + +/// Convert packed signed 32-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using writemask k (elements are copied from src to dst when the corresponding +/// mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundepi32_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtdq2ph, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvt_roundepi32_ph( + src: __m256h, + k: __mmask16, + a: __m512i, +) -> __m256h { + static_assert_rounding!(ROUNDING); + simd_select_bitmask(k, _mm512_cvt_roundepi32_ph::(a), src) +} + +/// Convert packed signed 32-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepi32_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtdq2ph, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvt_roundepi32_ph( + k: __mmask16, + a: __m512i, +) -> __m256h { + static_assert_rounding!(ROUNDING); + _mm512_mask_cvt_roundepi32_ph::(_mm256_setzero_ph(), k, a) +} + +/// Convert the signed 32-bit integer b to a half-precision (16-bit) floating-point element, store the +/// result in the lower element of dst, and copy the upper 7 packed elements from a to the upper elements +/// of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvti32_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsi2sh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvti32_sh(a: __m128h, b: i32) -> __m128h { + vcvtsi2sh(a, b, _MM_FROUND_CUR_DIRECTION) +} + +/// Convert the signed 32-bit integer b to a half-precision (16-bit) floating-point element, store the +/// result in the lower element of dst, and copy the upper 7 packed elements from a to the upper elements +/// of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundi32_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsi2sh, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvt_roundi32_sh(a: __m128h, b: i32) -> __m128h { + static_assert_rounding!(ROUNDING); + vcvtsi2sh(a, b, ROUNDING) +} + +/// Convert packed unsigned 32-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst. The upper 64 bits of dst are zeroed out. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepu32_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtudq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtepu32_ph(a: __m128i) -> __m128h { + _mm_mask_cvtepu32_ph(_mm_setzero_ph(), 0xff, a) +} + +/// Convert packed unsigned 32-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using writemask k (elements are copied from src to dst when the corresponding +/// mask bit is not set). The upper 64 bits of dst are zeroed out. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepu32_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtudq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvtepu32_ph(src: __m128h, k: __mmask8, a: __m128i) -> __m128h { + vcvtudq2ph_128(a.as_u32x4(), src, k) +} + +/// Convert packed unsigned 32-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// The upper 64 bits of dst are zeroed out. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepu32_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtudq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvtepu32_ph(k: __mmask8, a: __m128i) -> __m128h { + _mm_mask_cvtepu32_ph(_mm_setzero_ph(), k, a) +} + +/// Convert packed unsigned 32-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepu32_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtudq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cvtepu32_ph(a: __m256i) -> __m128h { + vcvtudq2ph_256(a.as_u32x8(), _MM_FROUND_CUR_DIRECTION) +} + +/// Convert packed unsigned 32-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using writemask k (elements are copied from src to dst when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepu32_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtudq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_cvtepu32_ph(src: __m128h, k: __mmask8, a: __m256i) -> __m128h { + simd_select_bitmask(k, _mm256_cvtepu32_ph(a), src) +} + +/// Convert packed unsigned 32-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepu32_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtudq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_cvtepu32_ph(k: __mmask8, a: __m256i) -> __m128h { + _mm256_mask_cvtepu32_ph(_mm_setzero_ph(), k, a) +} + +/// Convert packed unsigned 32-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepu32_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtudq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvtepu32_ph(a: __m512i) -> __m256h { + vcvtudq2ph_512(a.as_u32x16(), _MM_FROUND_CUR_DIRECTION) +} + +/// Convert packed unsigned 32-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using writemask k (elements are copied from src to dst when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepu32_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtudq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvtepu32_ph(src: __m256h, k: __mmask16, a: __m512i) -> __m256h { + simd_select_bitmask(k, _mm512_cvtepu32_ph(a), src) +} + +/// Convert packed unsigned 32-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepu32_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtudq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvtepu32_ph(k: __mmask16, a: __m512i) -> __m256h { + _mm512_mask_cvtepu32_ph(_mm256_setzero_ph(), k, a) +} + +/// Convert packed unsigned 32-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundepu32_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtudq2ph, ROUNDING = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvt_roundepu32_ph(a: __m512i) -> __m256h { + static_assert_rounding!(ROUNDING); + vcvtudq2ph_512(a.as_u32x16(), ROUNDING) +} + +/// Convert packed unsigned 32-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using writemask k (elements are copied from src to dst when the corresponding +/// mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundepu32_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtudq2ph, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvt_roundepu32_ph( + src: __m256h, + k: __mmask16, + a: __m512i, +) -> __m256h { + static_assert_rounding!(ROUNDING); + simd_select_bitmask(k, _mm512_cvt_roundepu32_ph::(a), src) +} + +/// Convert packed unsigned 32-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepu32_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtudq2ph, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvt_roundepu32_ph( + k: __mmask16, + a: __m512i, +) -> __m256h { + static_assert_rounding!(ROUNDING); + _mm512_mask_cvt_roundepu32_ph::(_mm256_setzero_ph(), k, a) +} + +/// Convert the unsigned 32-bit integer b to a half-precision (16-bit) floating-point element, store the +/// result in the lower element of dst, and copy the upper 7 packed elements from a to the upper elements +/// of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtu32_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtusi2sh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtu32_sh(a: __m128h, b: u32) -> __m128h { + vcvtusi2sh(a, b, _MM_FROUND_CUR_DIRECTION) +} + +/// Convert the unsigned 32-bit integer b to a half-precision (16-bit) floating-point element, store the +/// result in the lower element of dst, and copy the upper 7 packed elements from a to the upper elements +/// of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundu32_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtusi2sh, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvt_roundu32_sh(a: __m128h, b: u32) -> __m128h { + static_assert_rounding!(ROUNDING); + vcvtusi2sh(a, b, ROUNDING) +} + +/// Convert packed signed 64-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst. The upper 96 bits of dst are zeroed out. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi64_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtqq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtepi64_ph(a: __m128i) -> __m128h { + _mm_mask_cvtepi64_ph(_mm_setzero_ph(), 0xff, a) +} + +/// Convert packed signed 64-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using writemask k (elements are copied from src to dst when the corresponding +/// mask bit is not set). The upper 96 bits of dst are zeroed out. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepi64_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtqq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvtepi64_ph(src: __m128h, k: __mmask8, a: __m128i) -> __m128h { + vcvtqq2ph_128(a.as_i64x2(), src, k) +} + +/// Convert packed signed 64-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// The upper 96 bits of dst are zeroed out. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepi64_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtqq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvtepi64_ph(k: __mmask8, a: __m128i) -> __m128h { + _mm_mask_cvtepi64_ph(_mm_setzero_ph(), k, a) +} + +/// Convert packed signed 64-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst. The upper 64 bits of dst are zeroed out. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi64_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtqq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cvtepi64_ph(a: __m256i) -> __m128h { + _mm256_mask_cvtepi64_ph(_mm_setzero_ph(), 0xff, a) +} + +/// Convert packed signed 64-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using writemask k (elements are copied from src to dst when the corresponding +/// mask bit is not set). The upper 64 bits of dst are zeroed out. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepi64_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtqq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_cvtepi64_ph(src: __m128h, k: __mmask8, a: __m256i) -> __m128h { + vcvtqq2ph_256(a.as_i64x4(), src, k) +} + +/// Convert packed signed 64-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// The upper 64 bits of dst are zeroed out. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepi64_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtqq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_cvtepi64_ph(k: __mmask8, a: __m256i) -> __m128h { + _mm256_mask_cvtepi64_ph(_mm_setzero_ph(), k, a) +} + +/// Convert packed signed 64-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepi64_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtqq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvtepi64_ph(a: __m512i) -> __m128h { + vcvtqq2ph_512(a.as_i64x8(), _MM_FROUND_CUR_DIRECTION) +} + +/// Convert packed signed 64-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using writemask k (elements are copied from src to dst when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepi64_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtqq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvtepi64_ph(src: __m128h, k: __mmask8, a: __m512i) -> __m128h { + simd_select_bitmask(k, _mm512_cvtepi64_ph(a), src) +} + +/// Convert packed signed 64-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepi64_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtqq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvtepi64_ph(k: __mmask8, a: __m512i) -> __m128h { + _mm512_mask_cvtepi64_ph(_mm_setzero_ph(), k, a) +} + +/// Convert packed signed 64-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundepi64_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtqq2ph, ROUNDING = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvt_roundepi64_ph(a: __m512i) -> __m128h { + static_assert_rounding!(ROUNDING); + vcvtqq2ph_512(a.as_i64x8(), ROUNDING) +} + +/// Convert packed signed 64-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using writemask k (elements are copied from src to dst when the corresponding +/// mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundepi64_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtqq2ph, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvt_roundepi64_ph( + src: __m128h, + k: __mmask8, + a: __m512i, +) -> __m128h { + static_assert_rounding!(ROUNDING); + simd_select_bitmask(k, _mm512_cvt_roundepi64_ph::(a), src) +} + +/// Convert packed signed 64-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepi64_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtqq2ph, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvt_roundepi64_ph( + k: __mmask8, + a: __m512i, +) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm512_mask_cvt_roundepi64_ph::(_mm_setzero_ph(), k, a) +} + +/// Convert packed unsigned 64-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst. The upper 96 bits of dst are zeroed out. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepu64_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtuqq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtepu64_ph(a: __m128i) -> __m128h { + _mm_mask_cvtepu64_ph(_mm_setzero_ph(), 0xff, a) +} + +/// Convert packed unsigned 64-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using writemask k (elements are copied from src to dst when the corresponding +/// mask bit is not set). The upper 96 bits of dst are zeroed out. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtepu64_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtuqq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvtepu64_ph(src: __m128h, k: __mmask8, a: __m128i) -> __m128h { + vcvtuqq2ph_128(a.as_u64x2(), src, k) +} + +/// Convert packed unsigned 64-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// The upper 96 bits of dst are zeroed out. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtepu64_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtuqq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvtepu64_ph(k: __mmask8, a: __m128i) -> __m128h { + _mm_mask_cvtepu64_ph(_mm_setzero_ph(), k, a) +} + +/// Convert packed unsigned 64-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst. The upper 64 bits of dst are zeroed out. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepu64_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtuqq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cvtepu64_ph(a: __m256i) -> __m128h { + _mm256_mask_cvtepu64_ph(_mm_setzero_ph(), 0xff, a) +} + +/// Convert packed unsigned 64-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using writemask k (elements are copied from src to dst when the corresponding +/// mask bit is not set). The upper 64 bits of dst are zeroed out. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtepu64_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtuqq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_cvtepu64_ph(src: __m128h, k: __mmask8, a: __m256i) -> __m128h { + vcvtuqq2ph_256(a.as_u64x4(), src, k) +} + +/// Convert packed unsigned 64-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// The upper 64 bits of dst are zeroed out. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtepu64_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtuqq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_cvtepu64_ph(k: __mmask8, a: __m256i) -> __m128h { + _mm256_mask_cvtepu64_ph(_mm_setzero_ph(), k, a) +} + +/// Convert packed unsigned 64-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtepu64_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtuqq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvtepu64_ph(a: __m512i) -> __m128h { + vcvtuqq2ph_512(a.as_u64x8(), _MM_FROUND_CUR_DIRECTION) +} + +/// Convert packed unsigned 64-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using writemask k (elements are copied from src to dst when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtepu64_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtuqq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvtepu64_ph(src: __m128h, k: __mmask8, a: __m512i) -> __m128h { + simd_select_bitmask(k, _mm512_cvtepu64_ph(a), src) +} + +/// Convert packed unsigned 64-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtepu64_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtuqq2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvtepu64_ph(k: __mmask8, a: __m512i) -> __m128h { + _mm512_mask_cvtepu64_ph(_mm_setzero_ph(), k, a) +} + +/// Convert packed unsigned 64-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundepu64_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtuqq2ph, ROUNDING = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvt_roundepu64_ph(a: __m512i) -> __m128h { + static_assert_rounding!(ROUNDING); + vcvtuqq2ph_512(a.as_u64x8(), ROUNDING) +} + +/// Convert packed unsigned 64-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using writemask k (elements are copied from src to dst when the corresponding +/// mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundepu64_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtuqq2ph, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvt_roundepu64_ph( + src: __m128h, + k: __mmask8, + a: __m512i, +) -> __m128h { + static_assert_rounding!(ROUNDING); + simd_select_bitmask(k, _mm512_cvt_roundepu64_ph::(a), src) +} + +/// Convert packed unsigned 64-bit integers in a to packed half-precision (16-bit) floating-point elements, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundepu64_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtuqq2ph, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvt_roundepu64_ph( + k: __mmask8, + a: __m512i, +) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm512_mask_cvt_roundepu64_ph::(_mm_setzero_ph(), k, a) +} + +/// Convert packed single-precision (32-bit) floating-point elements in a to packed half-precision (16-bit) +/// floating-point elements, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtxps_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtps2phx))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtxps_ph(a: __m128) -> __m128h { + _mm_mask_cvtxps_ph(_mm_setzero_ph(), 0xff, a) +} + +/// Convert packed single-precision (32-bit) floating-point elements in a to packed half-precision (16-bit) +/// floating-point elements, and store the results in dst using writemask k (elements are copied from src to dst +/// when the corresponding mask bit is not set). The upper 64 bits of dst are zeroed out. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtxps_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtps2phx))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvtxps_ph(src: __m128h, k: __mmask8, a: __m128) -> __m128h { + vcvtps2phx_128(a, src, k) +} + +/// Convert packed single-precision (32-bit) floating-point elements in a to packed half-precision (16-bit) +/// floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the +/// corresponding mask bit is not set). The upper 64 bits of dst are zeroed out. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtxps_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtps2phx))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvtxps_ph(k: __mmask8, a: __m128) -> __m128h { + _mm_mask_cvtxps_ph(_mm_setzero_ph(), k, a) +} + +/// Convert packed single-precision (32-bit) floating-point elements in a to packed half-precision (16-bit) +/// floating-point elements, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtxps_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtps2phx))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cvtxps_ph(a: __m256) -> __m128h { + _mm256_mask_cvtxps_ph(_mm_setzero_ph(), 0xff, a) +} + +/// Convert packed single-precision (32-bit) floating-point elements in a to packed half-precision (16-bit) +/// floating-point elements, and store the results in dst using writemask k (elements are copied from src to dst +/// when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtxps_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtps2phx))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_cvtxps_ph(src: __m128h, k: __mmask8, a: __m256) -> __m128h { + vcvtps2phx_256(a, src, k) +} + +/// Convert packed single-precision (32-bit) floating-point elements in a to packed half-precision (16-bit) +/// floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the +/// corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtxps_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtps2phx))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_cvtxps_ph(k: __mmask8, a: __m256) -> __m128h { + _mm256_mask_cvtxps_ph(_mm_setzero_ph(), k, a) +} + +/// Convert packed single-precision (32-bit) floating-point elements in a to packed half-precision (16-bit) +/// floating-point elements, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtxps_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtps2phx))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvtxps_ph(a: __m512) -> __m256h { + _mm512_mask_cvtxps_ph(_mm256_setzero_ph(), 0xffff, a) +} + +/// Convert packed single-precision (32-bit) floating-point elements in a to packed half-precision (16-bit) +/// floating-point elements, and store the results in dst using writemask k (elements are copied from src to dst +/// when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtxps_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtps2phx))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvtxps_ph(src: __m256h, k: __mmask16, a: __m512) -> __m256h { + vcvtps2phx_512(a, src, k, _MM_FROUND_CUR_DIRECTION) +} + +/// Convert packed single-precision (32-bit) floating-point elements in a to packed half-precision (16-bit) +/// floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the +/// corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtxps_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtps2phx))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvtxps_ph(k: __mmask16, a: __m512) -> __m256h { + _mm512_mask_cvtxps_ph(_mm256_setzero_ph(), k, a) +} + +/// Convert packed single-precision (32-bit) floating-point elements in a to packed half-precision (16-bit) +/// floating-point elements, and store the results in dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtx_roundps_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtps2phx, ROUNDING = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvtx_roundps_ph(a: __m512) -> __m256h { + static_assert_rounding!(ROUNDING); + _mm512_mask_cvtx_roundps_ph::(_mm256_setzero_ph(), 0xffff, a) +} + +/// Convert packed single-precision (32-bit) floating-point elements in a to packed half-precision (16-bit) +/// floating-point elements, and store the results in dst using writemask k (elements are copied from src to dst +/// when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtx_roundps_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtps2phx, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvtx_roundps_ph( + src: __m256h, + k: __mmask16, + a: __m512, +) -> __m256h { + static_assert_rounding!(ROUNDING); + vcvtps2phx_512(a, src, k, ROUNDING) +} + +/// Convert packed single-precision (32-bit) floating-point elements in a to packed half-precision (16-bit) +/// floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the +/// corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtx_roundps_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtps2phx, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvtx_roundps_ph( + k: __mmask16, + a: __m512, +) -> __m256h { + static_assert_rounding!(ROUNDING); + _mm512_mask_cvtx_roundps_ph::(_mm256_setzero_ph(), k, a) +} + +/// Convert the lower single-precision (32-bit) floating-point element in b to a half-precision (16-bit) +/// floating-point elements, store the result in the lower element of dst, and copy the upper 7 packed +/// elements from a to the upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtss_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtss2sh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtss_sh(a: __m128h, b: __m128) -> __m128h { + _mm_mask_cvtss_sh(_mm_undefined_ph(), 0xff, a, b) +} + +/// Convert the lower single-precision (32-bit) floating-point element in b to a half-precision (16-bit) +/// floating-point elements, store the result in the lower element of dst using writemask k (the element +/// if copied from src when mask bit 0 is not set), and copy the upper 7 packed elements from a to the +/// upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtss_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtss2sh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvtss_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128) -> __m128h { + vcvtss2sh(a, b, src, k, _MM_FROUND_CUR_DIRECTION) +} + +/// Convert the lower single-precision (32-bit) floating-point element in b to a half-precision (16-bit) +/// floating-point elements, store the result in the lower element of dst using zeromask k (the element +/// is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from a to the upper +/// elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtss_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtss2sh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvtss_sh(k: __mmask8, a: __m128h, b: __m128) -> __m128h { + _mm_mask_cvtss_sh(_mm_setzero_ph(), k, a, b) +} + +/// Convert the lower single-precision (32-bit) floating-point element in b to a half-precision (16-bit) +/// floating-point elements, store the result in the lower element of dst, and copy the upper 7 packed +/// elements from a to the upper elements of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundss_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtss2sh, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvt_roundss_sh(a: __m128h, b: __m128) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm_mask_cvt_roundss_sh::(_mm_undefined_ph(), 0xff, a, b) +} + +/// Convert the lower single-precision (32-bit) floating-point element in b to a half-precision (16-bit) +/// floating-point elements, store the result in the lower element of dst using writemask k (the element +/// if copied from src when mask bit 0 is not set), and copy the upper 7 packed elements from a to the +/// upper elements of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvt_roundss_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtss2sh, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvt_roundss_sh( + src: __m128h, + k: __mmask8, + a: __m128h, + b: __m128, +) -> __m128h { + static_assert_rounding!(ROUNDING); + vcvtss2sh(a, b, src, k, ROUNDING) +} + +/// Convert the lower single-precision (32-bit) floating-point element in b to a half-precision (16-bit) +/// floating-point elements, store the result in the lower element of dst using zeromask k (the element +/// is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from a to the upper +/// elements of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvt_roundss_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtss2sh, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvt_roundss_sh( + k: __mmask8, + a: __m128h, + b: __m128, +) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm_mask_cvt_roundss_sh::(_mm_setzero_ph(), k, a, b) +} + +/// Convert packed double-precision (64-bit) floating-point elements in a to packed half-precision (16-bit) +/// floating-point elements, and store the results in dst. The upper 96 bits of dst are zeroed out. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtpd2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtpd_ph(a: __m128d) -> __m128h { + _mm_mask_cvtpd_ph(_mm_setzero_ph(), 0xff, a) +} + +/// Convert packed double-precision (64-bit) floating-point elements in a to packed half-precision (16-bit) +/// floating-point elements, and store the results in dst using writemask k (elements are copied from src to dst +/// when the corresponding mask bit is not set). The upper 96 bits of dst are zeroed out. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtpd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtpd2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvtpd_ph(src: __m128h, k: __mmask8, a: __m128d) -> __m128h { + vcvtpd2ph_128(a, src, k) +} + +/// Convert packed double-precision (64-bit) floating-point elements in a to packed half-precision (16-bit) +/// floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the +/// corresponding mask bit is not set). The upper 96 bits of dst are zeroed out. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtpd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtpd2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvtpd_ph(k: __mmask8, a: __m128d) -> __m128h { + _mm_mask_cvtpd_ph(_mm_setzero_ph(), k, a) +} + +/// Convert packed double-precision (64-bit) floating-point elements in a to packed half-precision (16-bit) +/// floating-point elements, and store the results in dst. The upper 64 bits of dst are zeroed out. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtpd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtpd2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cvtpd_ph(a: __m256d) -> __m128h { + _mm256_mask_cvtpd_ph(_mm_setzero_ph(), 0xff, a) +} + +/// Convert packed double-precision (64-bit) floating-point elements in a to packed half-precision (16-bit) +/// floating-point elements, and store the results in dst using writemask k (elements are copied from src to dst +/// when the corresponding mask bit is not set). The upper 64 bits of dst are zeroed out. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtpd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtpd2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_cvtpd_ph(src: __m128h, k: __mmask8, a: __m256d) -> __m128h { + vcvtpd2ph_256(a, src, k) +} + +/// Convert packed double-precision (64-bit) floating-point elements in a to packed half-precision (16-bit) +/// floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the +/// corresponding mask bit is not set). The upper 64 bits of dst are zeroed out. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtpd_ph) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtpd2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_cvtpd_ph(k: __mmask8, a: __m256d) -> __m128h { + _mm256_mask_cvtpd_ph(_mm_setzero_ph(), k, a) +} + +/// Convert packed double-precision (64-bit) floating-point elements in a to packed half-precision (16-bit) +/// floating-point elements, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtpd_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtpd2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvtpd_ph(a: __m512d) -> __m128h { + _mm512_mask_cvtpd_ph(_mm_setzero_ph(), 0xff, a) +} + +/// Convert packed double-precision (64-bit) floating-point elements in a to packed half-precision (16-bit) +/// floating-point elements, and store the results in dst using writemask k (elements are copied from src to dst +/// when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtpd_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtpd2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvtpd_ph(src: __m128h, k: __mmask8, a: __m512d) -> __m128h { + vcvtpd2ph_512(a, src, k, _MM_FROUND_CUR_DIRECTION) +} + +/// Convert packed double-precision (64-bit) floating-point elements in a to packed half-precision (16-bit) +/// floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the +/// corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtpd_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtpd2ph))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvtpd_ph(k: __mmask8, a: __m512d) -> __m128h { + _mm512_mask_cvtpd_ph(_mm_setzero_ph(), k, a) +} + +/// Convert packed double-precision (64-bit) floating-point elements in a to packed half-precision (16-bit) +/// floating-point elements, and store the results in dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundpd_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtpd2ph, ROUNDING = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvt_roundpd_ph(a: __m512d) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm512_mask_cvt_roundpd_ph::(_mm_setzero_ph(), 0xff, a) +} + +/// Convert packed double-precision (64-bit) floating-point elements in a to packed half-precision (16-bit) +/// floating-point elements, and store the results in dst using writemask k (elements are copied from src to dst +/// when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundpd_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtpd2ph, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvt_roundpd_ph( + src: __m128h, + k: __mmask8, + a: __m512d, +) -> __m128h { + static_assert_rounding!(ROUNDING); + vcvtpd2ph_512(a, src, k, ROUNDING) +} + +/// Convert packed double-precision (64-bit) floating-point elements in a to packed half-precision (16-bit) +/// floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the +/// corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundpd_ph) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtpd2ph, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvt_roundpd_ph(k: __mmask8, a: __m512d) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm512_mask_cvt_roundpd_ph::(_mm_setzero_ph(), k, a) +} + +/// Convert the lower double-precision (64-bit) floating-point element in b to a half-precision (16-bit) +/// floating-point elements, store the result in the lower element of dst, and copy the upper 7 packed +/// elements from a to the upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsd_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsd2sh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtsd_sh(a: __m128h, b: __m128d) -> __m128h { + _mm_mask_cvtsd_sh(_mm_undefined_ph(), 0xff, a, b) +} + +/// Convert the lower double-precision (64-bit) floating-point element in b to a half-precision (16-bit) +/// floating-point elements, store the result in the lower element of dst using writemask k (the element +/// if copied from src when mask bit 0 is not set), and copy the upper 7 packed elements from a to the +/// upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsd_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsd2sh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvtsd_sh(src: __m128h, k: __mmask8, a: __m128h, b: __m128d) -> __m128h { + vcvtsd2sh(a, b, src, k, _MM_FROUND_CUR_DIRECTION) +} + +/// Convert the lower double-precision (64-bit) floating-point element in b to a half-precision (16-bit) +/// floating-point elements, store the result in the lower element of dst using zeromask k (the element +/// is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from a to the upper +/// elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtsd_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsd2sh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvtsd_sh(k: __mmask8, a: __m128h, b: __m128d) -> __m128h { + _mm_mask_cvtsd_sh(_mm_setzero_ph(), k, a, b) +} + +/// Convert the lower double-precision (64-bit) floating-point element in b to a half-precision (16-bit) +/// floating-point elements, store the result in the lower element of dst, and copy the upper 7 packed +/// elements from a to the upper elements of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsd_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsd2sh, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvt_roundsd_sh(a: __m128h, b: __m128d) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm_mask_cvt_roundsd_sh::(_mm_undefined_ph(), 0xff, a, b) +} + +/// Convert the lower double-precision (64-bit) floating-point element in b to a half-precision (16-bit) +/// floating-point elements, store the result in the lower element of dst using writemask k (the element +/// if copied from src when mask bit 0 is not set), and copy the upper 7 packed elements from a to the +/// upper elements of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvt_roundsd_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsd2sh, ROUNDING = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvt_roundsd_sh( + src: __m128h, + k: __mmask8, + a: __m128h, + b: __m128d, +) -> __m128h { + static_assert_rounding!(ROUNDING); + vcvtsd2sh(a, b, src, k, ROUNDING) +} + +/// Convert the lower double-precision (64-bit) floating-point element in b to a half-precision (16-bit) +/// floating-point elements, store the result in the lower element of dst using zeromask k (the element +/// is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from a to the upper +/// elements of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvt_roundsd_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsd2sh, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvt_roundsd_sh( + k: __mmask8, + a: __m128h, + b: __m128d, +) -> __m128h { + static_assert_rounding!(ROUNDING); + _mm_mask_cvt_roundsd_sh::(_mm_setzero_ph(), k, a, b) +} + #[allow(improper_ctypes)] extern "C" { #[link_name = "llvm.x86.avx512fp16.mask.cmp.sh"] @@ -11281,1035 +13086,1669 @@ extern "C" { #[link_name = "llvm.x86.avx512fp16.vcomi.sh"] fn vcomish(a: __m128h, b: __m128h, imm8: i32, sae: i32) -> i32; - #[link_name = "llvm.x86.avx512fp16.add.ph.512"] - fn vaddph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.sub.ph.512"] - fn vsubph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mul.ph.512"] - fn vmulph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.div.ph.512"] - fn vdivph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.add.ph.512"] + fn vaddph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.sub.ph.512"] + fn vsubph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mul.ph.512"] + fn vmulph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.div.ph.512"] + fn vdivph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + + #[link_name = "llvm.x86.avx512fp16.mask.add.sh.round"] + fn vaddsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.sub.sh.round"] + fn vsubsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.mul.sh.round"] + fn vmulsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.div.sh.round"] + fn vdivsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.128"] + fn vfmulcph_128(a: __m128, b: __m128, src: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.256"] + fn vfmulcph_256(a: __m256, b: __m256, src: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.512"] + fn vfmulcph_512(a: __m512, b: __m512, src: __m512, k: __mmask16, rounding: i32) -> __m512; + #[link_name = "llvm.x86.avx512fp16.mask.vfmul.csh"] + fn vfmulcsh(a: __m128, b: __m128, src: __m128, k: __mmask8, rounding: i32) -> __m128; + + #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.128"] + fn vfcmulcph_128(a: __m128, b: __m128, src: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.256"] + fn vfcmulcph_256(a: __m256, b: __m256, src: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.512"] + fn vfcmulcph_512(a: __m512, b: __m512, src: __m512, k: __mmask16, rounding: i32) -> __m512; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.csh"] + fn vfcmulcsh(a: __m128, b: __m128, src: __m128, k: __mmask8, rounding: i32) -> __m128; + + #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.128"] + fn vfmaddcph_mask3_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.128"] + fn vfmaddcph_maskz_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.256"] + fn vfmaddcph_mask3_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.256"] + fn vfmaddcph_maskz_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.512"] + fn vfmaddcph_mask3_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) -> __m512; + #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.512"] + fn vfmaddcph_maskz_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) -> __m512; + #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.csh"] + fn vfmaddcsh_mask(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; + #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.csh"] + fn vfmaddcsh_maskz(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; + + #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.128"] + fn vfcmaddcph_mask3_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.128"] + fn vfcmaddcph_maskz_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.256"] + fn vfcmaddcph_mask3_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.256"] + fn vfcmaddcph_maskz_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.512"] + fn vfcmaddcph_mask3_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) + -> __m512; + #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.512"] + fn vfcmaddcph_maskz_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) + -> __m512; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.csh"] + fn vfcmaddcsh_mask(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; + #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.csh"] + fn vfcmaddcsh_maskz(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; + + #[link_name = "llvm.x86.avx512fp16.vfmadd.ph.512"] + fn vfmaddph_512(a: __m512h, b: __m512h, c: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.fma.f16"] + fn fmaf16(a: f16, b: f16, c: f16) -> f16; // TODO: use `crate::intrinsics::fmaf16` when it's available + #[link_name = "llvm.x86.avx512fp16.vfmadd.f16"] + fn vfmaddsh(a: f16, b: f16, c: f16, rounding: i32) -> f16; + + #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.128"] + fn vfmaddsubph_128(a: __m128h, b: __m128h, c: __m128h) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.256"] + fn vfmaddsubph_256(a: __m256h, b: __m256h, c: __m256h) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.512"] + fn vfmaddsubph_512(a: __m512h, b: __m512h, c: __m512h, rounding: i32) -> __m512h; + + #[link_name = "llvm.x86.avx512fp16.mask.rcp.ph.128"] + fn vrcpph_128(a: __m128h, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.rcp.ph.256"] + fn vrcpph_256(a: __m256h, src: __m256h, k: __mmask16) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.mask.rcp.ph.512"] + fn vrcpph_512(a: __m512h, src: __m512h, k: __mmask32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.rcp.sh"] + fn vrcpsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.ph.128"] + fn vrsqrtph_128(a: __m128h, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.ph.256"] + fn vrsqrtph_256(a: __m256h, src: __m256h, k: __mmask16) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.ph.512"] + fn vrsqrtph_512(a: __m512h, src: __m512h, k: __mmask32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.sh"] + fn vrsqrtsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.sqrt.ph.512"] + fn vsqrtph_512(a: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.sqrt.sh"] + fn vsqrtsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.max.ph.128"] + fn vmaxph_128(a: __m128h, b: __m128h) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.max.ph.256"] + fn vmaxph_256(a: __m256h, b: __m256h) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.max.ph.512"] + fn vmaxph_512(a: __m512h, b: __m512h, sae: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.max.sh.round"] + fn vmaxsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, sae: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.min.ph.128"] + fn vminph_128(a: __m128h, b: __m128h) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.min.ph.256"] + fn vminph_256(a: __m256h, b: __m256h) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.min.ph.512"] + fn vminph_512(a: __m512h, b: __m512h, sae: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.min.sh.round"] + fn vminsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, sae: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.getexp.ph.128"] + fn vgetexpph_128(a: __m128h, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.getexp.ph.256"] + fn vgetexpph_256(a: __m256h, src: __m256h, k: __mmask16) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.mask.getexp.ph.512"] + fn vgetexpph_512(a: __m512h, src: __m512h, k: __mmask32, sae: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.getexp.sh"] + fn vgetexpsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, sae: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.getmant.ph.128"] + fn vgetmantph_128(a: __m128h, imm8: i32, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.getmant.ph.256"] + fn vgetmantph_256(a: __m256h, imm8: i32, src: __m256h, k: __mmask16) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.mask.getmant.ph.512"] + fn vgetmantph_512(a: __m512h, imm8: i32, src: __m512h, k: __mmask32, sae: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.getmant.sh"] + fn vgetmantsh( + a: __m128h, + b: __m128h, + imm8: i32, + src: __m128h, + k: __mmask8, + sae: i32, + ) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.rndscale.ph.128"] + fn vrndscaleph_128(a: __m128h, imm8: i32, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.rndscale.ph.256"] + fn vrndscaleph_256(a: __m256h, imm8: i32, src: __m256h, k: __mmask16) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.mask.rndscale.ph.512"] + fn vrndscaleph_512(a: __m512h, imm8: i32, src: __m512h, k: __mmask32, sae: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.rndscale.sh"] + fn vrndscalesh( + a: __m128h, + b: __m128h, + src: __m128h, + k: __mmask8, + imm8: i32, + sae: i32, + ) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.scalef.ph.128"] + fn vscalefph_128(a: __m128h, b: __m128h, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.scalef.ph.256"] + fn vscalefph_256(a: __m256h, b: __m256h, src: __m256h, k: __mmask16) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.mask.scalef.ph.512"] + fn vscalefph_512(a: __m512h, b: __m512h, src: __m512h, k: __mmask32, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.scalef.sh"] + fn vscalefsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.reduce.ph.128"] + fn vreduceph_128(a: __m128h, imm8: i32, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.reduce.ph.256"] + fn vreduceph_256(a: __m256h, imm8: i32, src: __m256h, k: __mmask16) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.mask.reduce.ph.512"] + fn vreduceph_512(a: __m512h, imm8: i32, src: __m512h, k: __mmask32, sae: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.reduce.sh"] + fn vreducesh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, imm8: i32, sae: i32) + -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.fpclass.sh"] + fn vfpclasssh(a: __m128h, imm8: i32, k: __mmask8) -> __mmask8; + + #[link_name = "llvm.x86.avx512.sitofp.round.v8f16.v8i16"] + fn vcvtw2ph_128(a: i16x8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512.sitofp.round.v16f16.v16i16"] + fn vcvtw2ph_256(a: i16x16, rounding: i32) -> __m256h; + #[link_name = "llvm.x86.avx512.sitofp.round.v32f16.v32i16"] + fn vcvtw2ph_512(a: i16x32, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512.uitofp.round.v8f16.v8u16"] + fn vcvtuw2ph_128(a: u16x8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512.uitofp.round.v16f16.v16u16"] + fn vcvtuw2ph_256(a: u16x16, rounding: i32) -> __m256h; + #[link_name = "llvm.x86.avx512.uitofp.round.v32f16.v32u16"] + fn vcvtuw2ph_512(a: u16x32, rounding: i32) -> __m512h; + + #[link_name = "llvm.x86.avx512fp16.mask.vcvtdq2ph.128"] + fn vcvtdq2ph_128(a: i32x4, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512.sitofp.round.v8f16.v8i32"] + fn vcvtdq2ph_256(a: i32x8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512.sitofp.round.v16f16.v16i32"] + fn vcvtdq2ph_512(a: i32x16, rounding: i32) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.vcvtsi2sh"] + fn vcvtsi2sh(a: __m128h, b: i32, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtudq2ph.128"] + fn vcvtudq2ph_128(a: u32x4, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512.uitofp.round.v8f16.v8u32"] + fn vcvtudq2ph_256(a: u32x8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512.uitofp.round.v16f16.v16u32"] + fn vcvtudq2ph_512(a: u32x16, rounding: i32) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.vcvtusi2sh"] + fn vcvtusi2sh(a: __m128h, b: u32, rounding: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.vcvtqq2ph.128"] + fn vcvtqq2ph_128(a: i64x2, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtqq2ph.256"] + fn vcvtqq2ph_256(a: i64x4, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512.sitofp.round.v8f16.v8i64"] + fn vcvtqq2ph_512(a: i64x8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtuqq2ph.128"] + fn vcvtuqq2ph_128(a: u64x2, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtuqq2ph.256"] + fn vcvtuqq2ph_256(a: u64x4, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512.uitofp.round.v8f16.v8u64"] + fn vcvtuqq2ph_512(a: u64x8, rounding: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.vcvtps2phx.128"] + fn vcvtps2phx_128(a: __m128, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtps2phx.256"] + fn vcvtps2phx_256(a: __m256, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtps2phx.512"] + fn vcvtps2phx_512(a: __m512, src: __m256h, k: __mmask16, rounding: i32) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtss2sh.round"] + fn vcvtss2sh(a: __m128h, b: __m128, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.vcvtpd2ph.128"] + fn vcvtpd2ph_128(a: __m128d, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtpd2ph.256"] + fn vcvtpd2ph_256(a: __m256d, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtpd2ph.512"] + fn vcvtpd2ph_512(a: __m512d, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtsd2sh.round"] + fn vcvtsd2sh(a: __m128h, b: __m128d, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + +} + +#[cfg(test)] +mod tests { + use crate::core_arch::x86::*; + use crate::mem::transmute; + use crate::ptr::{addr_of, addr_of_mut}; + use stdarch_test::simd_test; + + #[target_feature(enable = "avx512fp16")] + unsafe fn _mm_set1_pch(re: f16, im: f16) -> __m128h { + _mm_setr_ph(re, im, re, im, re, im, re, im) + } - #[link_name = "llvm.x86.avx512fp16.mask.add.sh.round"] - fn vaddsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.sub.sh.round"] - fn vsubsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.mul.sh.round"] - fn vmulsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.div.sh.round"] - fn vdivsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[target_feature(enable = "avx512fp16")] + unsafe fn _mm256_set1_pch(re: f16, im: f16) -> __m256h { + _mm256_setr_ph( + re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, + ) + } - #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.128"] - fn vfmulcph_128(a: __m128, b: __m128, src: __m128, k: __mmask8) -> __m128; - #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.256"] - fn vfmulcph_256(a: __m256, b: __m256, src: __m256, k: __mmask8) -> __m256; - #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.512"] - fn vfmulcph_512(a: __m512, b: __m512, src: __m512, k: __mmask16, rounding: i32) -> __m512; - #[link_name = "llvm.x86.avx512fp16.mask.vfmul.csh"] - fn vfmulcsh(a: __m128, b: __m128, src: __m128, k: __mmask8, rounding: i32) -> __m128; + #[target_feature(enable = "avx512fp16")] + unsafe fn _mm512_set1_pch(re: f16, im: f16) -> __m512h { + _mm512_setr_ph( + re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, + re, im, re, im, re, im, re, im, re, im, + ) + } - #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.128"] - fn vfcmulcph_128(a: __m128, b: __m128, src: __m128, k: __mmask8) -> __m128; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.256"] - fn vfcmulcph_256(a: __m256, b: __m256, src: __m256, k: __mmask8) -> __m256; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.512"] - fn vfcmulcph_512(a: __m512, b: __m512, src: __m512, k: __mmask16, rounding: i32) -> __m512; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.csh"] - fn vfcmulcsh(a: __m128, b: __m128, src: __m128, k: __mmask8, rounding: i32) -> __m128; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_set_ph() { + let r = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let e = _mm_setr_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_set_ph() { + let r = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let e = _mm256_setr_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_set_ph() { + let r = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let e = _mm512_setr_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_set_sh() { + let r = _mm_set_sh(1.0); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_set1_ph() { + let r = _mm_set1_ph(1.0); + let e = _mm_set_ph(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_set1_ph() { + let r = _mm256_set1_ph(1.0); + let e = _mm256_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_set1_ph() { + let r = _mm512_set1_ph(1.0); + let e = _mm512_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_setr_ph() { + let r = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let e = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_setr_ph() { + let r = _mm256_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let e = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_setr_ph() { + let r = _mm512_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let e = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_setzero_ph() { + let r = _mm_setzero_ph(); + let e = _mm_set1_ph(0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_setzero_ph() { + let r = _mm256_setzero_ph(); + let e = _mm256_set1_ph(0.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_setzero_ph() { + let r = _mm512_setzero_ph(); + let e = _mm512_set1_ph(0.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castsi128_ph() { + let a = _mm_set1_epi16(0x3c00); + let r = _mm_castsi128_ph(a); + let e = _mm_set1_ph(1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castsi256_ph() { + let a = _mm256_set1_epi16(0x3c00); + let r = _mm256_castsi256_ph(a); + let e = _mm256_set1_ph(1.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castsi512_ph() { + let a = _mm512_set1_epi16(0x3c00); + let r = _mm512_castsi512_ph(a); + let e = _mm512_set1_ph(1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castph_si128() { + let a = _mm_set1_ph(1.0); + let r = _mm_castph_si128(a); + let e = _mm_set1_epi16(0x3c00); + assert_eq_m128i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph_si256() { + let a = _mm256_set1_ph(1.0); + let r = _mm256_castph_si256(a); + let e = _mm256_set1_epi16(0x3c00); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph_si512() { + let a = _mm512_set1_ph(1.0); + let r = _mm512_castph_si512(a); + let e = _mm512_set1_epi16(0x3c00); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castps_ph() { + let a = _mm_castsi128_ps(_mm_set1_epi16(0x3c00)); + let r = _mm_castps_ph(a); + let e = _mm_set1_ph(1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castps_ph() { + let a = _mm256_castsi256_ps(_mm256_set1_epi16(0x3c00)); + let r = _mm256_castps_ph(a); + let e = _mm256_set1_ph(1.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castps_ph() { + let a = _mm512_castsi512_ps(_mm512_set1_epi16(0x3c00)); + let r = _mm512_castps_ph(a); + let e = _mm512_set1_ph(1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castph_ps() { + let a = _mm_castsi128_ph(_mm_set1_epi32(0x3f800000)); + let r = _mm_castph_ps(a); + let e = _mm_set1_ps(1.0); + assert_eq_m128(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph_ps() { + let a = _mm256_castsi256_ph(_mm256_set1_epi32(0x3f800000)); + let r = _mm256_castph_ps(a); + let e = _mm256_set1_ps(1.0); + assert_eq_m256(r, e); + } - #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.128"] - fn vfmaddcph_mask3_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; - #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.128"] - fn vfmaddcph_maskz_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; - #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.256"] - fn vfmaddcph_mask3_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; - #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.256"] - fn vfmaddcph_maskz_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; - #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.512"] - fn vfmaddcph_mask3_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) -> __m512; - #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.512"] - fn vfmaddcph_maskz_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) -> __m512; - #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.csh"] - fn vfmaddcsh_mask(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; - #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.csh"] - fn vfmaddcsh_maskz(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph_ps() { + let a = _mm512_castsi512_ph(_mm512_set1_epi32(0x3f800000)); + let r = _mm512_castph_ps(a); + let e = _mm512_set1_ps(1.0); + assert_eq_m512(r, e); + } - #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.128"] - fn vfcmaddcph_mask3_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; - #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.128"] - fn vfcmaddcph_maskz_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.256"] - fn vfcmaddcph_mask3_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; - #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.256"] - fn vfcmaddcph_maskz_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.512"] - fn vfcmaddcph_mask3_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) - -> __m512; - #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.512"] - fn vfcmaddcph_maskz_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) - -> __m512; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.csh"] - fn vfcmaddcsh_mask(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; - #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.csh"] - fn vfcmaddcsh_maskz(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castpd_ph() { + let a = _mm_castsi128_pd(_mm_set1_epi16(0x3c00)); + let r = _mm_castpd_ph(a); + let e = _mm_set1_ph(1.0); + assert_eq_m128h(r, e); + } - #[link_name = "llvm.x86.avx512fp16.vfmadd.ph.512"] - fn vfmaddph_512(a: __m512h, b: __m512h, c: __m512h, rounding: i32) -> __m512h; - #[link_name = "llvm.fma.f16"] - fn fmaf16(a: f16, b: f16, c: f16) -> f16; // TODO: use `crate::intrinsics::fmaf16` when it's available - #[link_name = "llvm.x86.avx512fp16.vfmadd.f16"] - fn vfmaddsh(a: f16, b: f16, c: f16, rounding: i32) -> f16; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castpd_ph() { + let a = _mm256_castsi256_pd(_mm256_set1_epi16(0x3c00)); + let r = _mm256_castpd_ph(a); + let e = _mm256_set1_ph(1.0); + assert_eq_m256h(r, e); + } - #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.128"] - fn vfmaddsubph_128(a: __m128h, b: __m128h, c: __m128h) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.256"] - fn vfmaddsubph_256(a: __m256h, b: __m256h, c: __m256h) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.512"] - fn vfmaddsubph_512(a: __m512h, b: __m512h, c: __m512h, rounding: i32) -> __m512h; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castpd_ph() { + let a = _mm512_castsi512_pd(_mm512_set1_epi16(0x3c00)); + let r = _mm512_castpd_ph(a); + let e = _mm512_set1_ph(1.0); + assert_eq_m512h(r, e); + } - #[link_name = "llvm.x86.avx512fp16.mask.rcp.ph.128"] - fn vrcpph_128(a: __m128h, src: __m128h, k: __mmask8) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.rcp.ph.256"] - fn vrcpph_256(a: __m256h, src: __m256h, k: __mmask16) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.mask.rcp.ph.512"] - fn vrcpph_512(a: __m512h, src: __m512h, k: __mmask32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mask.rcp.sh"] - fn vrcpsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8) -> __m128h; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castph_pd() { + let a = _mm_castsi128_ph(_mm_set1_epi64x(0x3ff0000000000000)); + let r = _mm_castph_pd(a); + let e = _mm_set1_pd(1.0); + assert_eq_m128d(r, e); + } - #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.ph.128"] - fn vrsqrtph_128(a: __m128h, src: __m128h, k: __mmask8) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.ph.256"] - fn vrsqrtph_256(a: __m256h, src: __m256h, k: __mmask16) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.ph.512"] - fn vrsqrtph_512(a: __m512h, src: __m512h, k: __mmask32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.sh"] - fn vrsqrtsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8) -> __m128h; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph_pd() { + let a = _mm256_castsi256_ph(_mm256_set1_epi64x(0x3ff0000000000000)); + let r = _mm256_castph_pd(a); + let e = _mm256_set1_pd(1.0); + assert_eq_m256d(r, e); + } - #[link_name = "llvm.x86.avx512fp16.sqrt.ph.512"] - fn vsqrtph_512(a: __m512h, rounding: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mask.sqrt.sh"] - fn vsqrtsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph_pd() { + let a = _mm512_castsi512_ph(_mm512_set1_epi64(0x3ff0000000000000)); + let r = _mm512_castph_pd(a); + let e = _mm512_set1_pd(1.0); + assert_eq_m512d(r, e); + } - #[link_name = "llvm.x86.avx512fp16.max.ph.128"] - fn vmaxph_128(a: __m128h, b: __m128h) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.max.ph.256"] - fn vmaxph_256(a: __m256h, b: __m256h) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.max.ph.512"] - fn vmaxph_512(a: __m512h, b: __m512h, sae: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mask.max.sh.round"] - fn vmaxsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, sae: i32) -> __m128h; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph256_ph128() { + let a = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + let r = _mm256_castph256_ph128(a); + let e = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + assert_eq_m128h(r, e); + } - #[link_name = "llvm.x86.avx512fp16.min.ph.128"] - fn vminph_128(a: __m128h, b: __m128h) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.min.ph.256"] - fn vminph_256(a: __m256h, b: __m256h) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.min.ph.512"] - fn vminph_512(a: __m512h, b: __m512h, sae: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mask.min.sh.round"] - fn vminsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, sae: i32) -> __m128h; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph512_ph128() { + let a = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., + ); + let r = _mm512_castph512_ph128(a); + let e = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + assert_eq_m128h(r, e); + } - #[link_name = "llvm.x86.avx512fp16.mask.getexp.ph.128"] - fn vgetexpph_128(a: __m128h, src: __m128h, k: __mmask8) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.getexp.ph.256"] - fn vgetexpph_256(a: __m256h, src: __m256h, k: __mmask16) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.mask.getexp.ph.512"] - fn vgetexpph_512(a: __m512h, src: __m512h, k: __mmask32, sae: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mask.getexp.sh"] - fn vgetexpsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, sae: i32) -> __m128h; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph512_ph256() { + let a = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., + ); + let r = _mm512_castph512_ph256(a); + let e = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + assert_eq_m256h(r, e); + } - #[link_name = "llvm.x86.avx512fp16.mask.getmant.ph.128"] - fn vgetmantph_128(a: __m128h, imm8: i32, src: __m128h, k: __mmask8) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.getmant.ph.256"] - fn vgetmantph_256(a: __m256h, imm8: i32, src: __m256h, k: __mmask16) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.mask.getmant.ph.512"] - fn vgetmantph_512(a: __m512h, imm8: i32, src: __m512h, k: __mmask32, sae: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mask.getmant.sh"] - fn vgetmantsh( - a: __m128h, - b: __m128h, - imm8: i32, - src: __m128h, - k: __mmask8, - sae: i32, - ) -> __m128h; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph128_ph256() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm256_castph128_ph256(a); + assert_eq_m128h(_mm256_castph256_ph128(r), a); + } - #[link_name = "llvm.x86.avx512fp16.mask.rndscale.ph.128"] - fn vrndscaleph_128(a: __m128h, imm8: i32, src: __m128h, k: __mmask8) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.rndscale.ph.256"] - fn vrndscaleph_256(a: __m256h, imm8: i32, src: __m256h, k: __mmask16) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.mask.rndscale.ph.512"] - fn vrndscaleph_512(a: __m512h, imm8: i32, src: __m512h, k: __mmask32, sae: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mask.rndscale.sh"] - fn vrndscalesh( - a: __m128h, - b: __m128h, - src: __m128h, - k: __mmask8, - imm8: i32, - sae: i32, - ) -> __m128h; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph128_ph512() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm512_castph128_ph512(a); + assert_eq_m128h(_mm512_castph512_ph128(r), a); + } - #[link_name = "llvm.x86.avx512fp16.mask.scalef.ph.128"] - fn vscalefph_128(a: __m128h, b: __m128h, src: __m128h, k: __mmask8) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.scalef.ph.256"] - fn vscalefph_256(a: __m256h, b: __m256h, src: __m256h, k: __mmask16) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.mask.scalef.ph.512"] - fn vscalefph_512(a: __m512h, b: __m512h, src: __m512h, k: __mmask32, rounding: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mask.scalef.sh"] - fn vscalefsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph256_ph512() { + let a = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + let r = _mm512_castph256_ph512(a); + assert_eq_m256h(_mm512_castph512_ph256(r), a); + } - #[link_name = "llvm.x86.avx512fp16.mask.reduce.ph.128"] - fn vreduceph_128(a: __m128h, imm8: i32, src: __m128h, k: __mmask8) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.reduce.ph.256"] - fn vreduceph_256(a: __m256h, imm8: i32, src: __m256h, k: __mmask16) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.mask.reduce.ph.512"] - fn vreduceph_512(a: __m512h, imm8: i32, src: __m512h, k: __mmask32, sae: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mask.reduce.sh"] - fn vreducesh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, imm8: i32, sae: i32) - -> __m128h; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_zextph128_ph256() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm256_zextph128_ph256(a); + let e = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 0., 0., 0., 0., 0., 0., 0., 0., + ); + assert_eq_m256h(r, e); + } - #[link_name = "llvm.x86.avx512fp16.mask.fpclass.sh"] - fn vfpclasssh(a: __m128h, imm8: i32, k: __mmask8) -> __mmask8; -} + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_zextph128_ph512() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm512_zextph128_ph512(a); + let e = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + ); + assert_eq_m512h(r, e); + } -#[cfg(test)] -mod tests { - use crate::core_arch::x86::*; - use crate::mem::transmute; - use crate::ptr::{addr_of, addr_of_mut}; - use stdarch_test::simd_test; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_zextph256_ph512() { + let a = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + let r = _mm512_zextph256_ph512(a); + let e = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + ); + assert_eq_m512h(r, e); + } - #[target_feature(enable = "avx512fp16")] - unsafe fn _mm_set1_pch(re: f16, im: f16) -> __m128h { - _mm_setr_ph(re, im, re, im, re, im, re, im) + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_cmp_ph_mask() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0); + let r = _mm_cmp_ph_mask::<_CMP_EQ_OQ>(a, b); + assert_eq!(r, 0b11110000); } - #[target_feature(enable = "avx512fp16")] - unsafe fn _mm256_set1_pch(re: f16, im: f16) -> __m256h { - _mm256_setr_ph( - re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, - ) + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_cmp_ph_mask() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0); + let r = _mm_mask_cmp_ph_mask::<_CMP_EQ_OQ>(0b01010101, a, b); + assert_eq!(r, 0b01010000); } - #[target_feature(enable = "avx512fp16")] - unsafe fn _mm512_set1_pch(re: f16, im: f16) -> __m512h { - _mm512_setr_ph( - re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, - re, im, re, im, re, im, re, im, re, im, - ) + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_cmp_ph_mask() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0, 9.0, 10.0, 11.0, 12.0, -13.0, -14.0, -15.0, + -16.0, + ); + let r = _mm256_cmp_ph_mask::<_CMP_EQ_OQ>(a, b); + assert_eq!(r, 0b1111000011110000); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_set_ph() { - let r = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let e = _mm_setr_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - assert_eq_m128h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_cmp_ph_mask() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0, 9.0, 10.0, 11.0, 12.0, -13.0, -14.0, -15.0, + -16.0, + ); + let r = _mm256_mask_cmp_ph_mask::<_CMP_EQ_OQ>(0b0101010101010101, a, b); + assert_eq!(r, 0b0101000001010000); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_set_ph() { - let r = _mm256_set_ph( + unsafe fn test_mm512_cmp_ph_mask() { + let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); - let e = _mm256_setr_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + let b = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0, 9.0, 10.0, 11.0, 12.0, -13.0, -14.0, -15.0, + -16.0, 17.0, 18.0, 19.0, 20.0, -21.0, -22.0, -23.0, -24.0, 25.0, 26.0, 27.0, 28.0, + -29.0, -30.0, -31.0, -32.0, ); - assert_eq_m256h(r, e); + let r = _mm512_cmp_ph_mask::<_CMP_EQ_OQ>(a, b); + assert_eq!(r, 0b11110000111100001111000011110000); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_set_ph() { - let r = _mm512_set_ph( + unsafe fn test_mm512_mask_cmp_ph_mask() { + let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, ); - let e = _mm512_setr_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, + let b = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0, 9.0, 10.0, 11.0, 12.0, -13.0, -14.0, -15.0, + -16.0, 17.0, 18.0, 19.0, 20.0, -21.0, -22.0, -23.0, -24.0, 25.0, 26.0, 27.0, 28.0, + -29.0, -30.0, -31.0, -32.0, ); - assert_eq_m512h(r, e); + let r = _mm512_mask_cmp_ph_mask::<_CMP_EQ_OQ>(0b01010101010101010101010101010101, a, b); + assert_eq!(r, 0b01010000010100000101000001010000); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_set_sh() { - let r = _mm_set_sh(1.0); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0); - assert_eq_m128h(r, e); + unsafe fn test_mm_cmp_round_sh_mask() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_cmp_round_sh_mask::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(a, b); + assert_eq!(r, 1); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_set1_ph() { - let r = _mm_set1_ph(1.0); - let e = _mm_set_ph(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0); - assert_eq_m128h(r, e); + unsafe fn test_mm_mask_cmp_round_sh_mask() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_mask_cmp_round_sh_mask::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(0, a, b); + assert_eq!(r, 0); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_set1_ph() { - let r = _mm256_set1_ph(1.0); - let e = _mm256_set_ph( - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - ); - assert_eq_m256h(r, e); + unsafe fn test_mm_cmp_sh_mask() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_cmp_sh_mask::<_CMP_EQ_OQ>(a, b); + assert_eq!(r, 1); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_set1_ph() { - let r = _mm512_set1_ph(1.0); - let e = _mm512_set_ph( - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - ); - assert_eq_m512h(r, e); + unsafe fn test_mm_mask_cmp_sh_mask() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_mask_cmp_sh_mask::<_CMP_EQ_OQ>(0, a, b); + assert_eq!(r, 0); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_setr_ph() { - let r = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let e = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - assert_eq_m128h(r, e); + unsafe fn test_mm_comi_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_comi_round_sh::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(a, b); + assert_eq!(r, 1); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_setr_ph() { - let r = _mm256_setr_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let e = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - assert_eq_m256h(r, e); + unsafe fn test_mm_comi_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_comi_sh::<_CMP_EQ_OQ>(a, b); + assert_eq!(r, 1); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_setr_ph() { - let r = _mm512_setr_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let e = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - assert_eq_m512h(r, e); + unsafe fn test_mm_comieq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_comieq_sh(a, b); + assert_eq!(r, 1); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_setzero_ph() { - let r = _mm_setzero_ph(); - let e = _mm_set1_ph(0.0); - assert_eq_m128h(r, e); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comige_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_comige_sh(a, b); + assert_eq!(r, 1); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_setzero_ph() { - let r = _mm256_setzero_ph(); - let e = _mm256_set1_ph(0.0); - assert_eq_m256h(r, e); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comigt_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_comigt_sh(a, b); + assert_eq!(r, 1); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_setzero_ph() { - let r = _mm512_setzero_ph(); - let e = _mm512_set1_ph(0.0); - assert_eq_m512h(r, e); + unsafe fn test_mm_comile_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_comile_sh(a, b); + assert_eq!(r, 1); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castsi128_ph() { - let a = _mm_set1_epi16(0x3c00); - let r = _mm_castsi128_ph(a); - let e = _mm_set1_ph(1.0); - assert_eq_m128h(r, e); + unsafe fn test_mm_comilt_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_comilt_sh(a, b); + assert_eq!(r, 1); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castsi256_ph() { - let a = _mm256_set1_epi16(0x3c00); - let r = _mm256_castsi256_ph(a); - let e = _mm256_set1_ph(1.0); - assert_eq_m256h(r, e); + unsafe fn test_mm_comineq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_comineq_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomieq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_ucomieq_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomige_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_ucomige_sh(a, b); + assert_eq!(r, 1); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castsi512_ph() { - let a = _mm512_set1_epi16(0x3c00); - let r = _mm512_castsi512_ph(a); - let e = _mm512_set1_ph(1.0); - assert_eq_m512h(r, e); + unsafe fn test_mm_ucomigt_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_ucomigt_sh(a, b); + assert_eq!(r, 1); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castph_si128() { - let a = _mm_set1_ph(1.0); - let r = _mm_castph_si128(a); - let e = _mm_set1_epi16(0x3c00); - assert_eq_m128i(r, e); + unsafe fn test_mm_ucomile_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_ucomile_sh(a, b); + assert_eq!(r, 1); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph_si256() { - let a = _mm256_set1_ph(1.0); - let r = _mm256_castph_si256(a); - let e = _mm256_set1_epi16(0x3c00); - assert_eq_m256i(r, e); + unsafe fn test_mm_ucomilt_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_ucomilt_sh(a, b); + assert_eq!(r, 1); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph_si512() { - let a = _mm512_set1_ph(1.0); - let r = _mm512_castph_si512(a); - let e = _mm512_set1_epi16(0x3c00); - assert_eq_m512i(r, e); + unsafe fn test_mm_ucomineq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_ucomineq_sh(a, b); + assert_eq!(r, 1); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castps_ph() { - let a = _mm_castsi128_ps(_mm_set1_epi16(0x3c00)); - let r = _mm_castps_ph(a); - let e = _mm_set1_ph(1.0); - assert_eq_m128h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_load_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_load_ph(addr_of!(a).cast()); + assert_eq_m128h(a, b); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castps_ph() { - let a = _mm256_castsi256_ps(_mm256_set1_epi16(0x3c00)); - let r = _mm256_castps_ph(a); - let e = _mm256_set1_ph(1.0); - assert_eq_m256h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_load_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_load_ph(addr_of!(a).cast()); + assert_eq_m256h(a, b); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castps_ph() { - let a = _mm512_castsi512_ps(_mm512_set1_epi16(0x3c00)); - let r = _mm512_castps_ph(a); - let e = _mm512_set1_ph(1.0); - assert_eq_m512h(r, e); + unsafe fn test_mm512_load_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_load_ph(addr_of!(a).cast()); + assert_eq_m512h(a, b); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castph_ps() { - let a = _mm_castsi128_ph(_mm_set1_epi32(0x3f800000)); - let r = _mm_castph_ps(a); - let e = _mm_set1_ps(1.0); - assert_eq_m128(r, e); + unsafe fn test_mm_load_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_load_sh(addr_of!(a).cast()); + assert_eq_m128h(a, b); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph_ps() { - let a = _mm256_castsi256_ph(_mm256_set1_epi32(0x3f800000)); - let r = _mm256_castph_ps(a); - let e = _mm256_set1_ps(1.0); - assert_eq_m256(r, e); + unsafe fn test_mm_mask_load_sh() { + let a = _mm_set_sh(1.0); + let src = _mm_set_sh(2.); + let b = _mm_mask_load_sh(src, 1, addr_of!(a).cast()); + assert_eq_m128h(a, b); + let b = _mm_mask_load_sh(src, 0, addr_of!(a).cast()); + assert_eq_m128h(src, b); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph_ps() { - let a = _mm512_castsi512_ph(_mm512_set1_epi32(0x3f800000)); - let r = _mm512_castph_ps(a); - let e = _mm512_set1_ps(1.0); - assert_eq_m512(r, e); + unsafe fn test_mm_maskz_load_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_maskz_load_sh(1, addr_of!(a).cast()); + assert_eq_m128h(a, b); + let b = _mm_maskz_load_sh(0, addr_of!(a).cast()); + assert_eq_m128h(_mm_setzero_ph(), b); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castpd_ph() { - let a = _mm_castsi128_pd(_mm_set1_epi16(0x3c00)); - let r = _mm_castpd_ph(a); - let e = _mm_set1_ph(1.0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_loadu_ph() { + let array = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; + let r = _mm_loadu_ph(array.as_ptr()); + let e = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castpd_ph() { - let a = _mm256_castsi256_pd(_mm256_set1_epi16(0x3c00)); - let r = _mm256_castpd_ph(a); - let e = _mm256_set1_ph(1.0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_loadu_ph() { + let array = [ + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ]; + let r = _mm256_loadu_ph(array.as_ptr()); + let e = _mm256_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castpd_ph() { - let a = _mm512_castsi512_pd(_mm512_set1_epi16(0x3c00)); - let r = _mm512_castpd_ph(a); - let e = _mm512_set1_ph(1.0); + unsafe fn test_mm512_loadu_ph() { + let array = [ + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ]; + let r = _mm512_loadu_ph(array.as_ptr()); + let e = _mm512_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castph_pd() { - let a = _mm_castsi128_ph(_mm_set1_epi64x(0x3ff0000000000000)); - let r = _mm_castph_pd(a); - let e = _mm_set1_pd(1.0); - assert_eq_m128d(r, e); + unsafe fn test_mm_move_sh() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_sh(9.0); + let r = _mm_move_sh(a, b); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph_pd() { - let a = _mm256_castsi256_ph(_mm256_set1_epi64x(0x3ff0000000000000)); - let r = _mm256_castph_pd(a); - let e = _mm256_set1_pd(1.0); - assert_eq_m256d(r, e); + unsafe fn test_mm_mask_move_sh() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_sh(9.0); + let src = _mm_set_sh(10.0); + let r = _mm_mask_move_sh(src, 0, a, b); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 10.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph_pd() { - let a = _mm512_castsi512_ph(_mm512_set1_epi64(0x3ff0000000000000)); - let r = _mm512_castph_pd(a); - let e = _mm512_set1_pd(1.0); - assert_eq_m512d(r, e); + unsafe fn test_mm_maskz_move_sh() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_sh(9.0); + let r = _mm_maskz_move_sh(0, a, b); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 0.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph256_ph128() { - let a = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., - ); - let r = _mm256_castph256_ph128(a); - let e = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - assert_eq_m128h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_store_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let mut b = _mm_setzero_ph(); + _mm_store_ph(addr_of_mut!(b).cast(), a); + assert_eq_m128h(a, b); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph512_ph128() { - let a = _mm512_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., - 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_store_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let r = _mm512_castph512_ph128(a); - let e = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - assert_eq_m128h(r, e); + let mut b = _mm256_setzero_ph(); + _mm256_store_ph(addr_of_mut!(b).cast(), a); + assert_eq_m256h(a, b); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph512_ph256() { - let a = _mm512_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., - 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., - ); - let r = _mm512_castph512_ph256(a); - let e = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + unsafe fn test_mm512_store_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); - assert_eq_m256h(r, e); + let mut b = _mm512_setzero_ph(); + _mm512_store_ph(addr_of_mut!(b).cast(), a); + assert_eq_m512h(a, b); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph128_ph256() { - let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - let r = _mm256_castph128_ph256(a); - assert_eq_m128h(_mm256_castph256_ph128(r), a); + unsafe fn test_mm_store_sh() { + let a = _mm_set_sh(1.0); + let mut b = _mm_setzero_ph(); + _mm_store_sh(addr_of_mut!(b).cast(), a); + assert_eq_m128h(a, b); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph128_ph512() { - let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - let r = _mm512_castph128_ph512(a); - assert_eq_m128h(_mm512_castph512_ph128(r), a); + unsafe fn test_mm_mask_store_sh() { + let a = _mm_set_sh(1.0); + let mut b = _mm_setzero_ph(); + _mm_mask_store_sh(addr_of_mut!(b).cast(), 0, a); + assert_eq_m128h(_mm_setzero_ph(), b); + _mm_mask_store_sh(addr_of_mut!(b).cast(), 1, a); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_storeu_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let mut array = [0.0; 8]; + _mm_storeu_ph(array.as_mut_ptr(), a); + assert_eq_m128h(a, _mm_loadu_ph(array.as_ptr())); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph256_ph512() { - let a = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_storeu_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let r = _mm512_castph256_ph512(a); - assert_eq_m256h(_mm512_castph512_ph256(r), a); + let mut array = [0.0; 16]; + _mm256_storeu_ph(array.as_mut_ptr(), a); + assert_eq_m256h(a, _mm256_loadu_ph(array.as_ptr())); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_zextph128_ph256() { - let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - let r = _mm256_zextph128_ph256(a); - let e = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 0., 0., 0., 0., 0., 0., 0., 0., + unsafe fn test_mm512_storeu_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); - assert_eq_m256h(r, e); + let mut array = [0.0; 32]; + _mm512_storeu_ph(array.as_mut_ptr(), a); + assert_eq_m512h(a, _mm512_loadu_ph(array.as_ptr())); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_zextph128_ph512() { - let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - let r = _mm512_zextph128_ph512(a); - let e = _mm512_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., - 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., - ); - assert_eq_m512h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_add_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_add_ph(a, b); + let e = _mm_set1_ph(9.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_zextph256_ph512() { - let a = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., - ); - let r = _mm512_zextph256_ph512(a); - let e = _mm512_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 0., 0., 0., 0., - 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., - ); - assert_eq_m512h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_add_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_add_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(10., 9., 12., 9., 14., 9., 16., 9.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_cmp_ph_mask() { + unsafe fn test_mm_maskz_add_ph() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0); - let r = _mm_cmp_ph_mask::<_CMP_EQ_OQ>(a, b); - assert_eq!(r, 0b11110000); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_maskz_add_ph(0b01010101, a, b); + let e = _mm_set_ph(0., 9., 0., 9., 0., 9., 0., 9.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_cmp_ph_mask() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0); - let r = _mm_mask_cmp_ph_mask::<_CMP_EQ_OQ>(0b01010101, a, b); - assert_eq!(r, 0b01010000); + unsafe fn test_mm256_add_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_add_ph(a, b); + let e = _mm256_set1_ph(17.0); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_cmp_ph_mask() { + unsafe fn test_mm256_mask_add_ph() { let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); let b = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0, 9.0, 10.0, 11.0, 12.0, -13.0, -14.0, -15.0, - -16.0, + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, ); - let r = _mm256_cmp_ph_mask::<_CMP_EQ_OQ>(a, b); - assert_eq!(r, 0b1111000011110000); + let src = _mm256_set_ph( + 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + ); + let r = _mm256_mask_add_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 18., 17., 20., 17., 22., 17., 24., 17., 26., 17., 28., 17., 30., 17., 32., 17., + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_cmp_ph_mask() { + unsafe fn test_mm256_maskz_add_ph() { let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); let b = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0, 9.0, 10.0, 11.0, 12.0, -13.0, -14.0, -15.0, - -16.0, + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, ); - let r = _mm256_mask_cmp_ph_mask::<_CMP_EQ_OQ>(0b0101010101010101, a, b); - assert_eq!(r, 0b0101000001010000); + let r = _mm256_maskz_add_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cmp_ph_mask() { + unsafe fn test_mm512_add_ph() { let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, ); let b = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0, 9.0, 10.0, 11.0, 12.0, -13.0, -14.0, -15.0, - -16.0, 17.0, 18.0, 19.0, 20.0, -21.0, -22.0, -23.0, -24.0, 25.0, 26.0, 27.0, 28.0, - -29.0, -30.0, -31.0, -32.0, + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, ); - let r = _mm512_cmp_ph_mask::<_CMP_EQ_OQ>(a, b); - assert_eq!(r, 0b11110000111100001111000011110000); + let r = _mm512_add_ph(a, b); + let e = _mm512_set1_ph(33.0); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cmp_ph_mask() { + unsafe fn test_mm512_mask_add_ph() { let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, ); let b = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0, 9.0, 10.0, 11.0, 12.0, -13.0, -14.0, -15.0, - -16.0, 17.0, 18.0, 19.0, 20.0, -21.0, -22.0, -23.0, -24.0, 25.0, 26.0, 27.0, 28.0, - -29.0, -30.0, -31.0, -32.0, + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, ); - let r = _mm512_mask_cmp_ph_mask::<_CMP_EQ_OQ>(0b01010101010101010101010101010101, a, b); - assert_eq!(r, 0b01010000010100000101000001010000); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cmp_round_sh_mask() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_cmp_round_sh_mask::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(a, b); - assert_eq!(r, 1); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_cmp_round_sh_mask() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_mask_cmp_round_sh_mask::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(0, a, b); - assert_eq!(r, 0); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cmp_sh_mask() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_cmp_sh_mask::<_CMP_EQ_OQ>(a, b); - assert_eq!(r, 1); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_cmp_sh_mask() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_mask_cmp_sh_mask::<_CMP_EQ_OQ>(0, a, b); - assert_eq!(r, 0); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comi_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_comi_round_sh::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(a, b); - assert_eq!(r, 1); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_add_ph(src, 0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 34., 33., 36., 33., 38., 33., 40., 33., 42., 33., 44., 33., 46., 33., 48., 33., 50., + 33., 52., 33., 54., 33., 56., 33., 58., 33., 60., 33., 62., 33., 64., 33., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comi_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_comi_sh::<_CMP_EQ_OQ>(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_maskz_add_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_add_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., + 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comieq_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_comieq_sh(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_add_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_ph(33.0); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comige_sh() { - let a = _mm_set_sh(2.0); - let b = _mm_set_sh(1.0); - let r = _mm_comige_sh(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_mask_add_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 34., 33., 36., 33., 38., 33., 40., 33., 42., 33., 44., 33., 46., 33., 48., 33., 50., + 33., 52., 33., 54., 33., 56., 33., 58., 33., 60., 33., 62., 33., 64., 33., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comigt_sh() { - let a = _mm_set_sh(2.0); - let b = _mm_set_sh(1.0); - let r = _mm_comigt_sh(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_maskz_add_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., + 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comile_sh() { + unsafe fn test_mm_add_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_comile_sh(a, b); - assert_eq!(r, 1); + let r = _mm_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comilt_sh() { + unsafe fn test_mm_mask_add_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_comilt_sh(a, b); - assert_eq!(r, 1); + let src = _mm_set_sh(4.0); + let r = _mm_mask_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comineq_sh() { + unsafe fn test_mm_maskz_add_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_comineq_sh(a, b); - assert_eq!(r, 1); + let r = + _mm_maskz_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomieq_sh() { + unsafe fn test_mm_add_sh() { let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_ucomieq_sh(a, b); - assert_eq!(r, 1); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomige_sh() { - let a = _mm_set_sh(2.0); - let b = _mm_set_sh(1.0); - let r = _mm_ucomige_sh(a, b); - assert_eq!(r, 1); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomigt_sh() { - let a = _mm_set_sh(2.0); - let b = _mm_set_sh(1.0); - let r = _mm_ucomigt_sh(a, b); - assert_eq!(r, 1); + let b = _mm_set_sh(2.0); + let r = _mm_add_sh(a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomile_sh() { + unsafe fn test_mm_mask_add_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_ucomile_sh(a, b); - assert_eq!(r, 1); + let src = _mm_set_sh(4.0); + let r = _mm_mask_add_sh(src, 0, a, b); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_add_sh(src, 1, a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomilt_sh() { + unsafe fn test_mm_maskz_add_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_ucomilt_sh(a, b); - assert_eq!(r, 1); + let r = _mm_maskz_add_sh(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_add_sh(1, a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomineq_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_ucomineq_sh(a, b); - assert_eq!(r, 1); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_sub_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_sub_ph(a, b); + let e = _mm_set_ph(-7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_load_ph() { + unsafe fn test_mm_mask_sub_ph() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_load_ph(addr_of!(a).cast()); - assert_eq_m128h(a, b); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_sub_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(10., -5., 12., -1., 14., 3., 16., 7.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_load_ph() { + unsafe fn test_mm_maskz_sub_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_maskz_sub_ph(0b01010101, a, b); + let e = _mm_set_ph(0., -5., 0., -1., 0., 3., 0., 7.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_sub_ph() { let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let b = _mm256_load_ph(addr_of!(a).cast()); - assert_eq_m256h(a, b); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_sub_ph(a, b); + let e = _mm256_set_ph( + -15.0, -13.0, -11.0, -9.0, -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, + 15.0, + ); + assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_load_ph() { - let a = _mm512_set_ph( + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_sub_ph() { + let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, ); - let b = _mm512_load_ph(addr_of!(a).cast()); - assert_eq_m512h(a, b); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let src = _mm256_set_ph( + 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + ); + let r = _mm256_mask_sub_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 18., -13., 20., -9., 22., -5., 24., -1., 26., 3., 28., 7., 30., 11., 32., 15., + ); + assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_load_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_load_sh(addr_of!(a).cast()); - assert_eq_m128h(a, b); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_sub_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_maskz_sub_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., 0., 7., 0., 11., 0., 15., + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_load_sh() { - let a = _mm_set_sh(1.0); - let src = _mm_set_sh(2.); - let b = _mm_mask_load_sh(src, 1, addr_of!(a).cast()); - assert_eq_m128h(a, b); - let b = _mm_mask_load_sh(src, 0, addr_of!(a).cast()); - assert_eq_m128h(src, b); + unsafe fn test_mm512_sub_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_sub_ph(a, b); + let e = _mm512_set_ph( + -31.0, -29.0, -27.0, -25.0, -23.0, -21.0, -19.0, -17.0, -15.0, -13.0, -11.0, -9.0, + -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, + 23.0, 25.0, 27.0, 29.0, 31.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_load_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_maskz_load_sh(1, addr_of!(a).cast()); - assert_eq_m128h(a, b); - let b = _mm_maskz_load_sh(0, addr_of!(a).cast()); - assert_eq_m128h(_mm_setzero_ph(), b); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_loadu_ph() { - let array = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; - let r = _mm_loadu_ph(array.as_ptr()); - let e = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_loadu_ph() { - let array = [ - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ]; - let r = _mm256_loadu_ph(array.as_ptr()); - let e = _mm256_setr_ph( + unsafe fn test_mm512_mask_sub_ph() { + let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_sub_ph(src, 0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 34., -29., 36., -25., 38., -21., 40., -17., 42., -13., 44., -9., 46., -5., 48., -1., + 50., 3., 52., 7., 54., 11., 56., 15., 58., 19., 60., 23., 62., 27., 64., 31., ); - assert_eq_m256h(r, e); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_loadu_ph() { - let array = [ - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ]; - let r = _mm512_loadu_ph(array.as_ptr()); - let e = _mm512_setr_ph( + unsafe fn test_mm512_maskz_sub_ph() { + let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_sub_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0., -29., 0., -25., 0., -21., 0., -17., 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., + 0., 7., 0., 11., 0., 15., 0., 19., 0., 23., 0., 27., 0., 31., + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_move_sh() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_sh(9.0); - let r = _mm_move_sh(a, b); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_move_sh() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_sh(9.0); - let src = _mm_set_sh(10.0); - let r = _mm_mask_move_sh(src, 0, a, b); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 10.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_sub_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set_ph( + -31.0, -29.0, -27.0, -25.0, -23.0, -21.0, -19.0, -17.0, -15.0, -13.0, -11.0, -9.0, + -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, + 23.0, 25.0, 27.0, 29.0, 31.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_move_sh() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_sh(9.0); - let r = _mm_maskz_move_sh(0, a, b); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 0.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_store_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let mut b = _mm_setzero_ph(); - _mm_store_ph(addr_of_mut!(b).cast(), a); - assert_eq_m128h(a, b); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_store_ph() { - let a = _mm256_set_ph( + unsafe fn test_mm512_mask_sub_round_ph() { + let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); - let mut b = _mm256_setzero_ph(); - _mm256_store_ph(addr_of_mut!(b).cast(), a); - assert_eq_m256h(a, b); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 34., -29., 36., -25., 38., -21., 40., -17., 42., -13., 44., -9., 46., -5., 48., -1., + 50., 3., 52., 7., 54., 11., 56., 15., 58., 19., 60., 23., 62., 27., 64., 31., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_store_ph() { + unsafe fn test_mm512_maskz_sub_round_ph() { let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, ); - let mut b = _mm512_setzero_ph(); - _mm512_store_ph(addr_of_mut!(b).cast(), a); - assert_eq_m512h(a, b); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 0., -29., 0., -25., 0., -21., 0., -17., 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., + 0., 7., 0., 11., 0., 15., 0., 19., 0., 23., 0., 27., 0., 31., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_store_sh() { + unsafe fn test_mm_sub_round_sh() { let a = _mm_set_sh(1.0); - let mut b = _mm_setzero_ph(); - _mm_store_sh(addr_of_mut!(b).cast(), a); - assert_eq_m128h(a, b); + let b = _mm_set_sh(2.0); + let r = _mm_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_store_sh() { + unsafe fn test_mm_mask_sub_round_sh() { let a = _mm_set_sh(1.0); - let mut b = _mm_setzero_ph(); - _mm_mask_store_sh(addr_of_mut!(b).cast(), 0, a); - assert_eq_m128h(_mm_setzero_ph(), b); - _mm_mask_store_sh(addr_of_mut!(b).cast(), 1, a); - assert_eq_m128h(a, b); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_storeu_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let mut array = [0.0; 8]; - _mm_storeu_ph(array.as_mut_ptr(), a); - assert_eq_m128h(a, _mm_loadu_ph(array.as_ptr())); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_sub_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = + _mm_maskz_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_storeu_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let mut array = [0.0; 16]; - _mm256_storeu_ph(array.as_mut_ptr(), a); - assert_eq_m256h(a, _mm256_loadu_ph(array.as_ptr())); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_sub_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_sub_sh(a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_storeu_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let mut array = [0.0; 32]; - _mm512_storeu_ph(array.as_mut_ptr(), a); - assert_eq_m512h(a, _mm512_loadu_ph(array.as_ptr())); + unsafe fn test_mm_mask_sub_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_sub_sh(src, 0, a, b); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_sub_sh(src, 1, a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_sub_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_maskz_sub_sh(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_sub_sh(1, a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_add_ph() { + unsafe fn test_mm_mul_ph() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_add_ph(a, b); - let e = _mm_set1_ph(9.0); + let r = _mm_mul_ph(a, b); + let e = _mm_set_ph(8.0, 14.0, 18.0, 20.0, 20.0, 18.0, 14.0, 8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_add_ph() { + unsafe fn test_mm_mask_mul_ph() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm_mask_add_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(10., 9., 12., 9., 14., 9., 16., 9.); + let r = _mm_mask_mul_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(10., 14., 12., 20., 14., 18., 16., 8.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_add_ph() { + unsafe fn test_mm_maskz_mul_ph() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_maskz_add_ph(0b01010101, a, b); - let e = _mm_set_ph(0., 9., 0., 9., 0., 9., 0., 9.); + let r = _mm_maskz_mul_ph(0b01010101, a, b); + let e = _mm_set_ph(0., 14., 0., 20., 0., 18., 0., 8.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_add_ph() { + unsafe fn test_mm256_mul_ph() { let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); let b = _mm256_set_ph( 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, ); - let r = _mm256_add_ph(a, b); - let e = _mm256_set1_ph(17.0); + let r = _mm256_mul_ph(a, b); + let e = _mm256_set_ph( + 16.0, 30.0, 42.0, 52.0, 60.0, 66.0, 70.0, 72.0, 72.0, 70.0, 66.0, 60.0, 52.0, 42.0, + 30.0, 16.0, + ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_add_ph() { + unsafe fn test_mm256_mask_mul_ph() { let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); @@ -12319,30 +14758,30 @@ mod tests { let src = _mm256_set_ph( 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., ); - let r = _mm256_mask_add_ph(src, 0b0101010101010101, a, b); + let r = _mm256_mask_mul_ph(src, 0b0101010101010101, a, b); let e = _mm256_set_ph( - 18., 17., 20., 17., 22., 17., 24., 17., 26., 17., 28., 17., 30., 17., 32., 17., + 18., 30., 20., 52., 22., 66., 24., 72., 26., 70., 28., 60., 30., 42., 32., 16., ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_add_ph() { + unsafe fn test_mm256_maskz_mul_ph() { let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); let b = _mm256_set_ph( 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, ); - let r = _mm256_maskz_add_ph(0b0101010101010101, a, b); + let r = _mm256_maskz_mul_ph(0b0101010101010101, a, b); let e = _mm256_set_ph( - 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., + 0., 30., 0., 52., 0., 66., 0., 72., 0., 70., 0., 60., 0., 42., 0., 16., ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_add_ph() { + unsafe fn test_mm512_mul_ph() { let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, @@ -12353,13 +14792,17 @@ mod tests { 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, ); - let r = _mm512_add_ph(a, b); - let e = _mm512_set1_ph(33.0); + let r = _mm512_mul_ph(a, b); + let e = _mm512_set_ph( + 32.0, 62.0, 90.0, 116.0, 140.0, 162.0, 182.0, 200.0, 216.0, 230.0, 242.0, 252.0, 260.0, + 266.0, 270.0, 272.0, 272.0, 270.0, 266.0, 260.0, 252.0, 242.0, 230.0, 216.0, 200.0, + 182.0, 162.0, 140.0, 116.0, 90.0, 62.0, 32.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_add_ph() { + unsafe fn test_mm512_mask_mul_ph() { let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, @@ -12374,16 +14817,16 @@ mod tests { 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., ); - let r = _mm512_mask_add_ph(src, 0b01010101010101010101010101010101, a, b); + let r = _mm512_mask_mul_ph(src, 0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 34., 33., 36., 33., 38., 33., 40., 33., 42., 33., 44., 33., 46., 33., 48., 33., 50., - 33., 52., 33., 54., 33., 56., 33., 58., 33., 60., 33., 62., 33., 64., 33., + 34., 62., 36., 116., 38., 162., 40., 200., 42., 230., 44., 252., 46., 266., 48., 272., + 50., 270., 52., 260., 54., 242., 56., 216., 58., 182., 60., 140., 62., 90., 64., 32., ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_add_ph() { + unsafe fn test_mm512_maskz_mul_ph() { let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, @@ -12394,16 +14837,16 @@ mod tests { 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, ); - let r = _mm512_maskz_add_ph(0b01010101010101010101010101010101, a, b); + let r = _mm512_maskz_mul_ph(0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., - 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., + 0., 62., 0., 116., 0., 162., 0., 200., 0., 230., 0., 252., 0., 266., 0., 272., 0., + 270., 0., 260., 0., 242., 0., 216., 0., 182., 0., 140., 0., 90., 0., 32., ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_add_round_ph() { + unsafe fn test_mm512_mul_round_ph() { let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, @@ -12414,13 +14857,17 @@ mod tests { 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, ); - let r = _mm512_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_ph(33.0); + let r = _mm512_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set_ph( + 32.0, 62.0, 90.0, 116.0, 140.0, 162.0, 182.0, 200.0, 216.0, 230.0, 242.0, 252.0, 260.0, + 266.0, 270.0, 272.0, 272.0, 270.0, 266.0, 260.0, 252.0, 242.0, 230.0, 216.0, 200.0, + 182.0, 162.0, 140.0, 116.0, 90.0, 62.0, 32.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_add_round_ph() { + unsafe fn test_mm512_mask_mul_round_ph() { let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, @@ -12435,21 +14882,21 @@ mod tests { 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., ); - let r = _mm512_mask_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 0b01010101010101010101010101010101, a, b, ); let e = _mm512_set_ph( - 34., 33., 36., 33., 38., 33., 40., 33., 42., 33., 44., 33., 46., 33., 48., 33., 50., - 33., 52., 33., 54., 33., 56., 33., 58., 33., 60., 33., 62., 33., 64., 33., + 34., 62., 36., 116., 38., 162., 40., 200., 42., 230., 44., 252., 46., 266., 48., 272., + 50., 270., 52., 260., 54., 242., 56., 216., 58., 182., 60., 140., 62., 90., 64., 32., ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_add_round_ph() { + unsafe fn test_mm512_maskz_mul_round_ph() { let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, @@ -12460,939 +14907,983 @@ mod tests { 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, ); - let r = _mm512_maskz_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_maskz_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 0., 62., 0., 116., 0., 162., 0., 200., 0., 230., 0., 252., 0., 266., 0., 272., 0., + 270., 0., 260., 0., 242., 0., 216., 0., 182., 0., 140., 0., 90., 0., 32., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mul_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_mul_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_mul_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = + _mm_maskz_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mul_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_mul_sh(a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_mul_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_mul_sh(src, 0, a, b); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_mul_sh(src, 1, a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_mul_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_maskz_mul_sh(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_mul_sh(1, a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_div_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let r = _mm_div_ph(a, b); + let e = _mm_set1_ph(0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_div_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let src = _mm_set_ph(4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0); + let r = _mm_mask_div_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_div_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let r = _mm_maskz_div_ph(0b01010101, a, b); + let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_div_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let r = _mm256_div_ph(a, b); + let e = _mm256_set1_ph(0.5); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_div_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let src = _mm256_set_ph( + 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, + 19.0, + ); + let r = _mm256_mask_div_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_div_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let r = _mm256_maskz_div_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_div_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_div_ph(a, b); + let e = _mm512_set1_ph(0.5); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_div_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let src = _mm512_set_ph( + 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, + 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, + 33.0, 34.0, 35.0, + ); + let r = _mm512_mask_div_ph(src, 0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, + 20.0, 0.5, 22.0, 0.5, 24.0, 0.5, 26.0, 0.5, 28.0, 0.5, 30.0, 0.5, 32.0, 0.5, 34.0, 0.5, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_div_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_maskz_div_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, + 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_div_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_ph(0.5); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_div_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let src = _mm512_set_ph( + 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, + 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, + 33.0, 34.0, 35.0, + ); + let r = _mm512_mask_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, + 20.0, 0.5, 22.0, 0.5, 24.0, 0.5, 26.0, 0.5, 28.0, 0.5, 30.0, 0.5, 32.0, 0.5, 34.0, 0.5, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_div_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_maskz_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b01010101010101010101010101010101, a, b, ); let e = _mm512_set_ph( - 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., - 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, + 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_add_round_sh() { + unsafe fn test_mm_div_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_set_sh(3.0); + let r = _mm_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_add_round_sh() { + unsafe fn test_mm_mask_div_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); let src = _mm_set_sh(4.0); - let r = _mm_mask_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 0, a, b, ); let e = _mm_set_sh(4.0); assert_eq_m128h(r, e); - let r = _mm_mask_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 1, a, b, ); - let e = _mm_set_sh(3.0); + let e = _mm_set_sh(0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_add_round_sh() { + unsafe fn test_mm_maskz_div_round_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); let r = - _mm_maskz_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + _mm_maskz_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); let e = _mm_set_sh(0.0); assert_eq_m128h(r, e); let r = - _mm_maskz_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_set_sh(3.0); + _mm_maskz_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_add_sh() { + unsafe fn test_mm_div_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_add_sh(a, b); - let e = _mm_set_sh(3.0); + let r = _mm_div_sh(a, b); + let e = _mm_set_sh(0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_add_sh() { + unsafe fn test_mm_mask_div_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); let src = _mm_set_sh(4.0); - let r = _mm_mask_add_sh(src, 0, a, b); + let r = _mm_mask_div_sh(src, 0, a, b); let e = _mm_set_sh(4.0); assert_eq_m128h(r, e); - let r = _mm_mask_add_sh(src, 1, a, b); - let e = _mm_set_sh(3.0); + let r = _mm_mask_div_sh(src, 1, a, b); + let e = _mm_set_sh(0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_add_sh() { + unsafe fn test_mm_maskz_div_sh() { let a = _mm_set_sh(1.0); let b = _mm_set_sh(2.0); - let r = _mm_maskz_add_sh(0, a, b); + let r = _mm_maskz_div_sh(0, a, b); let e = _mm_set_sh(0.0); assert_eq_m128h(r, e); - let r = _mm_maskz_add_sh(1, a, b); - let e = _mm_set_sh(3.0); + let r = _mm_maskz_div_sh(1, a, b); + let e = _mm_set_sh(0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_sub_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_sub_ph(a, b); - let e = _mm_set_ph(-7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0); + unsafe fn test_mm_mul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let r = _mm_mul_pch(a, b); + let e = _mm_set1_pch(-1.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_sub_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm_mask_sub_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(10., -5., 12., -1., 14., 3., 16., 7.); + unsafe fn test_mm_mask_mul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); + let r = _mm_mask_mul_pch(src, 0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_sub_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_maskz_sub_ph(0b01010101, a, b); - let e = _mm_set_ph(0., -5., 0., -1., 0., 3., 0., 7.); + unsafe fn test_mm_maskz_mul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let r = _mm_maskz_mul_pch(0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_sub_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_sub_ph(a, b); - let e = _mm256_set_ph( - -15.0, -13.0, -11.0, -9.0, -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, - 15.0, - ); + unsafe fn test_mm256_mul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_mul_pch(a, b); + let e = _mm256_set1_pch(-1.0, 0.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_sub_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let src = _mm256_set_ph( - 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + unsafe fn test_mm256_mask_mul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let src = _mm256_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, ); - let r = _mm256_mask_sub_ph(src, 0b0101010101010101, a, b); - let e = _mm256_set_ph( - 18., -13., 20., -9., 22., -5., 24., -1., 26., 3., 28., 7., 30., 11., 32., 15., + let r = _mm256_mask_mul_pch(src, 0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_sub_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_maskz_sub_ph(0b0101010101010101, a, b); - let e = _mm256_set_ph( - 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., 0., 7., 0., 11., 0., 15., + unsafe fn test_mm256_maskz_mul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_maskz_mul_pch(0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_sub_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_sub_ph(a, b); - let e = _mm512_set_ph( - -31.0, -29.0, -27.0, -25.0, -23.0, -21.0, -19.0, -17.0, -15.0, -13.0, -11.0, -9.0, - -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, - 23.0, 25.0, 27.0, 29.0, 31.0, - ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_sub_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., - ); - let r = _mm512_mask_sub_ph(src, 0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 34., -29., 36., -25., 38., -21., 40., -17., 42., -13., 44., -9., 46., -5., 48., -1., - 50., 3., 52., 7., 54., 11., 56., 15., 58., 19., 60., 23., 62., 27., 64., 31., - ); + unsafe fn test_mm512_mul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_mul_pch(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_sub_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, + unsafe fn test_mm512_mask_mul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, ); - let r = _mm512_maskz_sub_ph(0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 0., -29., 0., -25., 0., -21., 0., -17., 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., - 0., 7., 0., 11., 0., 15., 0., 19., 0., 23., 0., 27., 0., 31., + let r = _mm512_mask_mul_pch(src, 0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_sub_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set_ph( - -31.0, -29.0, -27.0, -25.0, -23.0, -21.0, -19.0, -17.0, -15.0, -13.0, -11.0, -9.0, - -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, - 23.0, 25.0, 27.0, 29.0, 31.0, + unsafe fn test_mm512_maskz_mul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_mul_pch(0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_sub_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + unsafe fn test_mm512_mul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_mul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, ); - let r = _mm512_mask_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, - 0b01010101010101010101010101010101, + 0b0101010101010101, a, b, ); - let e = _mm512_set_ph( - 34., -29., 36., -25., 38., -21., 40., -17., 42., -13., 44., -9., 46., -5., 48., -1., - 50., 3., 52., 7., 54., 11., 56., 15., 58., 19., 60., 23., 62., 27., 64., 31., + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_sub_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, + unsafe fn test_mm512_maskz_mul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, a, b, ); - let e = _mm512_set_ph( - 0., -29., 0., -25., 0., -21., 0., -17., 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., - 0., 7., 0., 11., 0., 15., 0., 19., 0., 23., 0., 27., 0., 31., + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_sub_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_set_sh(-1.0); + unsafe fn test_mm_mul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_sub_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_mask_mul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 0, a, b, ); - let e = _mm_set_sh(4.0); - assert_eq_m128h(r, e); - let r = _mm_mask_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_set_sh(-1.0); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_sub_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = - _mm_maskz_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_set_sh(0.0); - assert_eq_m128h(r, e); + unsafe fn test_mm_maskz_mul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); let r = - _mm_maskz_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_set_sh(-1.0); + _mm_maskz_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_sub_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_sub_sh(a, b); - let e = _mm_set_sh(-1.0); + unsafe fn test_mm_mul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_mul_sch(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_sub_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_sub_sh(src, 0, a, b); - let e = _mm_set_sh(4.0); - assert_eq_m128h(r, e); - let r = _mm_mask_sub_sh(src, 1, a, b); - let e = _mm_set_sh(-1.0); + unsafe fn test_mm_mask_mul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_mul_sch(src, 0, a, b); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_sub_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_maskz_sub_sh(0, a, b); - let e = _mm_set_sh(0.0); - assert_eq_m128h(r, e); - let r = _mm_maskz_sub_sh(1, a, b); - let e = _mm_set_sh(-1.0); + unsafe fn test_mm_maskz_mul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_maskz_mul_sch(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mul_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_mul_ph(a, b); - let e = _mm_set_ph(8.0, 14.0, 18.0, 20.0, 20.0, 18.0, 14.0, 8.0); + unsafe fn test_mm_fmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let r = _mm_fmul_pch(a, b); + let e = _mm_set1_pch(-1.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_mul_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm_mask_mul_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(10., 14., 12., 20., 14., 18., 16., 8.); + unsafe fn test_mm_mask_fmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); + let r = _mm_mask_fmul_pch(src, 0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_mul_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_maskz_mul_ph(0b01010101, a, b); - let e = _mm_set_ph(0., 14., 0., 20., 0., 18., 0., 8.); + unsafe fn test_mm_maskz_fmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let r = _mm_maskz_fmul_pch(0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mul_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_mul_ph(a, b); - let e = _mm256_set_ph( - 16.0, 30.0, 42.0, 52.0, 60.0, 66.0, 70.0, 72.0, 72.0, 70.0, 66.0, 60.0, 52.0, 42.0, - 30.0, 16.0, - ); + unsafe fn test_mm256_fmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_fmul_pch(a, b); + let e = _mm256_set1_pch(-1.0, 0.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_mul_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let src = _mm256_set_ph( - 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + unsafe fn test_mm256_mask_fmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let src = _mm256_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, ); - let r = _mm256_mask_mul_ph(src, 0b0101010101010101, a, b); - let e = _mm256_set_ph( - 18., 30., 20., 52., 22., 66., 24., 72., 26., 70., 28., 60., 30., 42., 32., 16., + let r = _mm256_mask_fmul_pch(src, 0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_mul_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_maskz_mul_ph(0b0101010101010101, a, b); - let e = _mm256_set_ph( - 0., 30., 0., 52., 0., 66., 0., 72., 0., 70., 0., 60., 0., 42., 0., 16., + unsafe fn test_mm256_maskz_fmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_maskz_fmul_pch(0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mul_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_mul_ph(a, b); - let e = _mm512_set_ph( - 32.0, 62.0, 90.0, 116.0, 140.0, 162.0, 182.0, 200.0, 216.0, 230.0, 242.0, 252.0, 260.0, - 266.0, 270.0, 272.0, 272.0, 270.0, 266.0, 260.0, 252.0, 242.0, 230.0, 216.0, 200.0, - 182.0, 162.0, 140.0, 116.0, 90.0, 62.0, 32.0, - ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_mul_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., - ); - let r = _mm512_mask_mul_ph(src, 0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 34., 62., 36., 116., 38., 162., 40., 200., 42., 230., 44., 252., 46., 266., 48., 272., - 50., 270., 52., 260., 54., 242., 56., 216., 58., 182., 60., 140., 62., 90., 64., 32., - ); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_fmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_fmul_pch(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_mul_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, + unsafe fn test_mm512_mask_fmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, ); - let r = _mm512_maskz_mul_ph(0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 0., 62., 0., 116., 0., 162., 0., 200., 0., 230., 0., 252., 0., 266., 0., 272., 0., - 270., 0., 260., 0., 242., 0., 216., 0., 182., 0., 140., 0., 90., 0., 32., + let r = _mm512_mask_fmul_pch(src, 0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mul_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set_ph( - 32.0, 62.0, 90.0, 116.0, 140.0, 162.0, 182.0, 200.0, 216.0, 230.0, 242.0, 252.0, 260.0, - 266.0, 270.0, 272.0, 272.0, 270.0, 266.0, 260.0, 252.0, 242.0, 230.0, 216.0, 200.0, - 182.0, 162.0, 140.0, 116.0, 90.0, 62.0, 32.0, + unsafe fn test_mm512_maskz_fmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_fmul_pch(0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_mul_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + unsafe fn test_mm512_fmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_fmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, ); - let r = _mm512_mask_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, - 0b01010101010101010101010101010101, + 0b0101010101010101, a, b, ); - let e = _mm512_set_ph( - 34., 62., 36., 116., 38., 162., 40., 200., 42., 230., 44., 252., 46., 266., 48., 272., - 50., 270., 52., 260., 54., 242., 56., 216., 58., 182., 60., 140., 62., 90., 64., 32., + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_mul_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, + unsafe fn test_mm512_maskz_fmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, a, b, ); - let e = _mm512_set_ph( - 0., 62., 0., 116., 0., 162., 0., 200., 0., 230., 0., 252., 0., 266., 0., 272., 0., - 270., 0., 260., 0., 242., 0., 216., 0., 182., 0., 140., 0., 90., 0., 32., + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mul_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_set_sh(2.0); + unsafe fn test_mm_fmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_mul_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_mask_fmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 0, a, b, ); - let e = _mm_set_sh(4.0); - assert_eq_m128h(r, e); - let r = _mm_mask_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_set_sh(2.0); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_mul_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = - _mm_maskz_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_set_sh(0.0); - assert_eq_m128h(r, e); + unsafe fn test_mm_maskz_fmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); let r = - _mm_maskz_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_set_sh(2.0); + _mm_maskz_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mul_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_mul_sh(a, b); - let e = _mm_set_sh(2.0); + unsafe fn test_mm_fmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_fmul_sch(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_mul_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_mul_sh(src, 0, a, b); - let e = _mm_set_sh(4.0); - assert_eq_m128h(r, e); - let r = _mm_mask_mul_sh(src, 1, a, b); - let e = _mm_set_sh(2.0); + unsafe fn test_mm_mask_fmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_fmul_sch(src, 0, a, b); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_mul_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_maskz_mul_sh(0, a, b); - let e = _mm_set_sh(0.0); - assert_eq_m128h(r, e); - let r = _mm_maskz_mul_sh(1, a, b); - let e = _mm_set_sh(2.0); + unsafe fn test_mm_maskz_fmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_maskz_fmul_sch(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_div_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let r = _mm_div_ph(a, b); - let e = _mm_set1_ph(0.5); + unsafe fn test_mm_cmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let r = _mm_cmul_pch(a, b); + let e = _mm_set1_pch(-1.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_div_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let src = _mm_set_ph(4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0); - let r = _mm_mask_div_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5); + unsafe fn test_mm_mask_cmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); + let r = _mm_mask_cmul_pch(src, 0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_div_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let r = _mm_maskz_div_ph(0b01010101, a, b); - let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); + unsafe fn test_mm_maskz_cmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let r = _mm_maskz_cmul_pch(0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_div_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let r = _mm256_div_ph(a, b); - let e = _mm256_set1_ph(0.5); + unsafe fn test_mm256_cmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let r = _mm256_cmul_pch(a, b); + let e = _mm256_set1_pch(-1.0, 0.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_div_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let src = _mm256_set_ph( - 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, - 19.0, + unsafe fn test_mm256_mask_cmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let src = _mm256_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, ); - let r = _mm256_mask_div_ph(src, 0b0101010101010101, a, b); - let e = _mm256_set_ph( - 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, + let r = _mm256_mask_cmul_pch(src, 0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_div_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let r = _mm256_maskz_div_ph(0b0101010101010101, a, b); - let e = _mm256_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + unsafe fn test_mm256_maskz_cmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let r = _mm256_maskz_cmul_pch(0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_div_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let r = _mm512_div_ph(a, b); - let e = _mm512_set1_ph(0.5); + unsafe fn test_mm512_cmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_cmul_pch(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_div_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let src = _mm512_set_ph( - 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, - 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, - 33.0, 34.0, 35.0, + unsafe fn test_mm512_mask_cmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, ); - let r = _mm512_mask_div_ph(src, 0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, - 20.0, 0.5, 22.0, 0.5, 24.0, 0.5, 26.0, 0.5, 28.0, 0.5, 30.0, 0.5, 32.0, 0.5, 34.0, 0.5, + let r = _mm512_mask_cmul_pch(src, 0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_div_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let r = _mm512_maskz_div_ph(0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, - 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + unsafe fn test_mm512_maskz_cmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_maskz_cmul_pch(0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_div_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let r = _mm512_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_ph(0.5); + unsafe fn test_mm512_cmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_div_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let src = _mm512_set_ph( - 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, - 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, - 33.0, 34.0, 35.0, + unsafe fn test_mm512_mask_cmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, ); - let r = _mm512_mask_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, - 0b01010101010101010101010101010101, + 0b0101010101010101, a, b, ); - let e = _mm512_set_ph( - 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, - 20.0, 0.5, 22.0, 0.5, 24.0, 0.5, 26.0, 0.5, 28.0, 0.5, 30.0, 0.5, 32.0, 0.5, 34.0, 0.5, + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_div_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let r = _mm512_maskz_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, + unsafe fn test_mm512_maskz_cmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_maskz_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, a, b, ); - let e = _mm512_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, - 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_div_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_set_sh(0.5); + unsafe fn test_mm_cmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_cmul_sch(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_div_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_set_sh(4.0); - assert_eq_m128h(r, e); - let r = _mm_mask_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_set_sh(0.5); + unsafe fn test_mm_mask_cmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_cmul_sch(src, 0, a, b); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_div_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = - _mm_maskz_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_set_sh(0.0); - assert_eq_m128h(r, e); - let r = - _mm_maskz_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_set_sh(0.5); + unsafe fn test_mm_maskz_cmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_maskz_cmul_sch(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_div_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_div_sh(a, b); - let e = _mm_set_sh(0.5); + unsafe fn test_mm_cmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_div_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_div_sh(src, 0, a, b); - let e = _mm_set_sh(4.0); - assert_eq_m128h(r, e); - let r = _mm_mask_div_sh(src, 1, a, b); - let e = _mm_set_sh(0.5); + unsafe fn test_mm_mask_cmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_div_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_maskz_div_sh(0, a, b); - let e = _mm_set_sh(0.0); - assert_eq_m128h(r, e); - let r = _mm_maskz_div_sh(1, a, b); - let e = _mm_set_sh(0.5); + unsafe fn test_mm_maskz_cmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = + _mm_maskz_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mul_pch() { + unsafe fn test_mm_fcmul_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); - let r = _mm_mul_pch(a, b); + let b = _mm_set1_pch(0.0, -1.0); + let r = _mm_fcmul_pch(a, b); let e = _mm_set1_pch(-1.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_mul_pch() { + unsafe fn test_mm_mask_fcmul_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); - let r = _mm_mask_mul_pch(src, 0b0101, a, b); + let r = _mm_mask_fcmul_pch(src, 0b0101, a, b); let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_mul_pch() { + unsafe fn test_mm_maskz_fcmul_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); - let r = _mm_maskz_mul_pch(0b0101, a, b); + let b = _mm_set1_pch(0.0, -1.0); + let r = _mm_maskz_fcmul_pch(0b0101, a, b); let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mul_pch() { + unsafe fn test_mm256_fcmul_pch() { let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_mul_pch(a, b); + let b = _mm256_set1_pch(0.0, -1.0); + let r = _mm256_fcmul_pch(a, b); let e = _mm256_set1_pch(-1.0, 0.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_mul_pch() { + unsafe fn test_mm256_mask_fcmul_pch() { let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); let src = _mm256_setr_ph( 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, ); - let r = _mm256_mask_mul_pch(src, 0b01010101, a, b); + let r = _mm256_mask_fcmul_pch(src, 0b01010101, a, b); let e = _mm256_setr_ph( -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, ); @@ -13400,10 +15891,10 @@ mod tests { } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_mul_pch() { + unsafe fn test_mm256_maskz_fcmul_pch() { let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_maskz_mul_pch(0b01010101, a, b); + let b = _mm256_set1_pch(0.0, -1.0); + let r = _mm256_maskz_fcmul_pch(0b01010101, a, b); let e = _mm256_setr_ph( -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); @@ -13411,24 +15902,24 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mul_pch() { + unsafe fn test_mm512_fcmul_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_mul_pch(a, b); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_fcmul_pch(a, b); let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_mul_pch() { + unsafe fn test_mm512_mask_fcmul_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); let src = _mm512_setr_ph( 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, ); - let r = _mm512_mask_mul_pch(src, 0b0101010101010101, a, b); + let r = _mm512_mask_fcmul_pch(src, 0b0101010101010101, a, b); let e = _mm512_setr_ph( -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, @@ -13438,10 +15929,10 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_mul_pch() { + unsafe fn test_mm512_maskz_fcmul_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_maskz_mul_pch(0b0101010101010101, a, b); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_maskz_fcmul_pch(0b0101010101010101, a, b); let e = _mm512_setr_ph( -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, @@ -13450,24 +15941,24 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + unsafe fn test_mm512_fcmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_mul_round_pch() { + unsafe fn test_mm512_mask_fcmul_round_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); let src = _mm512_setr_ph( 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, ); - let r = _mm512_mask_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 0b0101010101010101, a, @@ -13482,10 +15973,10 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_mul_round_pch() { + unsafe fn test_mm512_maskz_fcmul_round_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_maskz_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_maskz_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b0101010101010101, a, b, @@ -13498,5386 +15989,5955 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mul_round_sch() { + unsafe fn test_mm_fcmul_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_fcmul_sch(a, b); let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_mul_round_sch() { + unsafe fn test_mm_mask_fcmul_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); + let r = _mm_mask_fcmul_sch(src, 0, a, b); let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_mul_round_sch() { + unsafe fn test_mm_maskz_fcmul_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = - _mm_maskz_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_maskz_fcmul_sch(0, a, b); let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mul_sch() { + unsafe fn test_mm_fcmul_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_mul_sch(a, b); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_mul_sch() { + unsafe fn test_mm_mask_fcmul_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_mul_sch(src, 0, a, b); + let r = _mm_mask_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_mul_sch() { + unsafe fn test_mm_maskz_fcmul_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_maskz_mul_sch(0, a, b); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = + _mm_maskz_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmul_pch() { + unsafe fn test_mm_abs_ph() { + let a = _mm_set_ph(-1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0); + let r = _mm_abs_ph(a); + let e = _mm_set_ph(1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_abs_ph() { + let a = _mm256_set_ph( + -1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, + -14.0, + ); + let r = _mm256_abs_ph(a); + let e = _mm256_set_ph( + 1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_abs_ph() { + let a = _mm512_set_ph( + -1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, + -14.0, 15.0, -16.0, 17.0, -18.0, 19.0, -20.0, 21.0, -22.0, 23.0, -24.0, 25.0, -26.0, + 27.0, -28.0, 29.0, -30.0, + ); + let r = _mm512_abs_ph(a); + let e = _mm512_set_ph( + 1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, + 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, + 29.0, 30.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_conj_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); - let r = _mm_fmul_pch(a, b); - let e = _mm_set1_pch(-1.0, 0.0); + let r = _mm_conj_pch(a); + let e = _mm_set1_pch(0.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmul_pch() { + unsafe fn test_mm_mask_conj_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); - let r = _mm_mask_fmul_pch(src, 0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); + let r = _mm_mask_conj_pch(src, 0b0101, a); + let e = _mm_setr_ph(0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmul_pch() { + unsafe fn test_mm_maskz_conj_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); - let r = _mm_maskz_fmul_pch(0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); + let r = _mm_maskz_conj_pch(0b0101, a); + let e = _mm_setr_ph(0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmul_pch() { + unsafe fn test_mm256_conj_pch() { let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_fmul_pch(a, b); - let e = _mm256_set1_pch(-1.0, 0.0); + let r = _mm256_conj_pch(a); + let e = _mm256_set1_pch(0.0, -1.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmul_pch() { + unsafe fn test_mm256_mask_conj_pch() { let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); let src = _mm256_setr_ph( 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, ); - let r = _mm256_mask_fmul_pch(src, 0b01010101, a, b); + let r = _mm256_mask_conj_pch(src, 0b01010101, a); let e = _mm256_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + 0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0, 0.0, -1.0, 12.0, 13.0, 0.0, -1.0, 16.0, 17.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmul_pch() { + unsafe fn test_mm256_maskz_conj_pch() { let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_maskz_fmul_pch(0b01010101, a, b); + let r = _mm256_maskz_conj_pch(0b01010101, a); + let e = _mm256_setr_ph( + 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_conj_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_conj_pch(a); + let e = _mm512_set1_pch(0.0, -1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_conj_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, + ); + let r = _mm512_mask_conj_pch(src, 0b0101010101010101, a); + let e = _mm512_setr_ph( + 0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0, 0.0, -1.0, 12.0, 13.0, 0.0, -1.0, 16.0, 17.0, + 0.0, -1.0, 20.0, 21.0, 0.0, -1.0, 24.0, 25.0, 0.0, -1.0, 28.0, 29.0, 0.0, -1.0, 32.0, + 33.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_conj_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_conj_pch(0b0101010101010101, a); + let e = _mm512_setr_ph( + 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, + 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_fmadd_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_fmadd_pch(a, b, c); + let e = _mm_set1_pch(-2.0, 3.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_fmadd_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_mask_fmadd_pch(a, 0b0101, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask3_fmadd_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_mask3_fmadd_pch(a, b, c, 0b0101); + let e = _mm_setr_ph(-2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_fmadd_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_maskz_fmadd_pch(0b0101, a, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_fmadd_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_fmadd_pch(a, b, c); + let e = _mm256_set1_pch(-2.0, 3.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_fmadd_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_mask_fmadd_pch(a, 0b01010101, b, c); + let e = _mm256_setr_ph( + -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask3_fmadd_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_mask3_fmadd_pch(a, b, c, 0b01010101); + let e = _mm256_setr_ph( + -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_fmadd_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_maskz_fmadd_pch(0b01010101, a, b, c); let e = _mm256_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmul_pch() { + unsafe fn test_mm512_fmadd_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_fmul_pch(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_fmadd_pch(a, b, c); + let e = _mm512_set1_pch(-2.0, 3.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmul_pch() { + unsafe fn test_mm512_mask_fmadd_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask_fmadd_pch(a, 0b0101010101010101, b, c); + let e = _mm512_setr_ph( + -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, + -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, ); - let r = _mm512_mask_fmul_pch(src, 0b0101010101010101, a, b); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask3_fmadd_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask3_fmadd_pch(a, b, c, 0b0101010101010101); let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, + -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmul_pch() { + unsafe fn test_mm512_maskz_fmadd_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_maskz_fmul_pch(0b0101010101010101, a, b); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_maskz_fmadd_pch(0b0101010101010101, a, b, c); let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, + -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmul_round_pch() { + unsafe fn test_mm512_fmadd_round_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = + _mm512_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_pch(-2.0, 3.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmul_round_pch() { + unsafe fn test_mm512_mask_fmadd_round_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, - ); - let r = _mm512_mask_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0b0101010101010101, + b, + c, + ); + let e = _mm512_setr_ph( + -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, + -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask3_fmadd_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask3_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, + c, + 0b0101010101010101, ); let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, + -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmul_round_pch() { + unsafe fn test_mm512_maskz_fmadd_round_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_maskz_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_maskz_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b0101010101010101, a, b, + c, ); let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, + -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmul_round_sch() { + unsafe fn test_mm_fmadd_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_fmadd_sch(a, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmul_round_sch() { + unsafe fn test_mm_mask_fmadd_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask_fmadd_sch(a, 0, b, c); + let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + let r = _mm_mask_fmadd_sch(a, 1, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmul_round_sch() { + unsafe fn test_mm_mask3_fmadd_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = - _mm_maskz_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask3_fmadd_sch(a, b, c, 0); + let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + let r = _mm_mask3_fmadd_sch(a, b, c, 1); + let e = _mm_setr_ph(-2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_fmadd_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_maskz_fmadd_sch(0, a, b, c); let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); + let r = _mm_maskz_fmadd_sch(1, a, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmul_sch() { + unsafe fn test_mm_fmadd_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_fmul_sch(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmul_sch() { + unsafe fn test_mm_mask_fmadd_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_fmul_sch(src, 0, a, b); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0, b, c, + ); + let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + let r = _mm_mask_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 1, b, c, + ); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmul_sch() { + unsafe fn test_mm_mask3_fmadd_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_maskz_fmul_sch(0, a, b); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask3_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 0, + ); + let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + let r = _mm_mask3_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 1, + ); + let e = _mm_setr_ph(-2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_fmadd_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_maskz_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0, a, b, c, + ); let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); + let r = _mm_maskz_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 1, a, b, c, + ); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_cmul_pch() { + unsafe fn test_mm_fcmadd_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let r = _mm_cmul_pch(a, b); - let e = _mm_set1_pch(-1.0, 0.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_fcmadd_pch(a, b, c); + let e = _mm_set1_pch(2.0, 3.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_cmul_pch() { + unsafe fn test_mm_mask_fcmadd_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); - let r = _mm_mask_cmul_pch(src, 0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_mask_fcmadd_pch(a, 0b0101, b, c); + let e = _mm_setr_ph(2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_cmul_pch() { + unsafe fn test_mm_mask3_fcmadd_pch() { let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let r = _mm_maskz_cmul_pch(0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_mask3_fcmadd_pch(a, b, c, 0b0101); + let e = _mm_setr_ph(2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_fcmadd_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_maskz_fcmadd_pch(0b0101, a, b, c); + let e = _mm_setr_ph(2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_cmul_pch() { + unsafe fn test_mm256_fcmadd_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_fcmadd_pch(a, b, c); + let e = _mm256_set1_pch(2.0, 3.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_fcmadd_pch() { let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let r = _mm256_cmul_pch(a, b); - let e = _mm256_set1_pch(-1.0, 0.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_mask_fcmadd_pch(a, 0b01010101, b, c); + let e = _mm256_setr_ph( + 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, + ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_cmul_pch() { + unsafe fn test_mm256_mask3_fcmadd_pch() { let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let src = _mm256_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - ); - let r = _mm256_mask_cmul_pch(src, 0b01010101, a, b); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_mask3_fcmadd_pch(a, b, c, 0b01010101); let e = _mm256_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_cmul_pch() { + unsafe fn test_mm256_maskz_fcmadd_pch() { let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let r = _mm256_maskz_cmul_pch(0b01010101, a, b); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_maskz_fcmadd_pch(0b01010101, a, b, c); let e = _mm256_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cmul_pch() { + unsafe fn test_mm512_fcmadd_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_cmul_pch(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_fcmadd_pch(a, b, c); + let e = _mm512_set1_pch(2.0, 3.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cmul_pch() { + unsafe fn test_mm512_mask_fcmadd_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask_fcmadd_pch(a, 0b0101010101010101, b, c); + let e = _mm512_setr_ph( + 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, + 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, ); - let r = _mm512_mask_cmul_pch(src, 0b0101010101010101, a, b); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask3_fcmadd_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask3_fcmadd_pch(a, b, c, 0b0101010101010101); let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, + 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_cmul_pch() { + unsafe fn test_mm512_maskz_fcmadd_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_maskz_cmul_pch(0b0101010101010101, a, b); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_maskz_fcmadd_pch(0b0101010101010101, a, b, c); let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, + 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cmul_round_pch() { + unsafe fn test_mm512_fcmadd_round_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = + _mm512_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_pch(2.0, 3.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cmul_round_pch() { + unsafe fn test_mm512_mask_fcmadd_round_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, - ); - let r = _mm512_mask_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0b0101010101010101, + b, + c, + ); + let e = _mm512_setr_ph( + 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, + 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask3_fcmadd_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask3_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, + c, + 0b0101010101010101, ); let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, + 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_cmul_round_pch() { + unsafe fn test_mm512_maskz_fcmadd_round_pch() { let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_maskz_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_maskz_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b0101010101010101, a, b, + c, ); let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, + 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cmul_sch() { + unsafe fn test_mm_fcmadd_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_cmul_sch(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_fcmadd_sch(a, b, c); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_cmul_sch() { + unsafe fn test_mm_mask_fcmadd_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_cmul_sch(src, 0, a, b); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask_fcmadd_sch(a, 0, b, c); + let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + let r = _mm_mask_fcmadd_sch(a, 1, b, c); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_cmul_sch() { + unsafe fn test_mm_mask3_fcmadd_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_maskz_cmul_sch(0, a, b); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask3_fcmadd_sch(a, b, c, 0); + let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + let r = _mm_mask3_fcmadd_sch(a, b, c, 1); + let e = _mm_setr_ph(2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_fcmadd_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_maskz_fcmadd_sch(0, a, b, c); let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); + let r = _mm_maskz_fcmadd_sch(1, a, b, c); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cmul_round_sch() { + unsafe fn test_mm_fcmadd_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_cmul_round_sch() { + unsafe fn test_mm_mask_fcmadd_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0, b, c, ); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + let r = _mm_mask_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 1, b, c, + ); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_cmul_round_sch() { + unsafe fn test_mm_mask3_fcmadd_round_sch() { let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = - _mm_maskz_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask3_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 0, + ); + let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + let r = _mm_mask3_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 1, + ); + let e = _mm_setr_ph(2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_fcmadd_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_maskz_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0, a, b, c, + ); let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); + let r = _mm_maskz_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 1, a, b, c, + ); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_fmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_fmadd_ph(a, b, c); + let e = _mm_set1_ph(5.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fcmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let r = _mm_fcmul_pch(a, b); - let e = _mm_set1_pch(-1.0, 0.0); + unsafe fn test_mm_mask_fmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask_fmadd_ph(a, 0b01010101, b, c); + let e = _mm_set_ph(1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fcmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); - let r = _mm_mask_fcmul_pch(src, 0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); + unsafe fn test_mm_mask3_fmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask3_fmadd_ph(a, b, c, 0b01010101); + let e = _mm_set_ph(3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fcmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let r = _mm_maskz_fcmul_pch(0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); + unsafe fn test_mm_maskz_fmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_maskz_fmadd_ph(0b01010101, a, b, c); + let e = _mm_set_ph(0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fcmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let r = _mm256_fcmul_pch(a, b); - let e = _mm256_set1_pch(-1.0, 0.0); + unsafe fn test_mm256_fmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_fmadd_ph(a, b, c); + let e = _mm256_set1_ph(5.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fcmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let src = _mm256_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + unsafe fn test_mm256_mask_fmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask_fmadd_ph(a, 0b0101010101010101, b, c); + let e = _mm256_set_ph( + 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, ); - let r = _mm256_mask_fcmul_pch(src, 0b01010101, a, b); - let e = _mm256_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask3_fmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask3_fmadd_ph(a, b, c, 0b0101010101010101); + let e = _mm256_set_ph( + 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fcmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let r = _mm256_maskz_fcmul_pch(0b01010101, a, b); - let e = _mm256_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + unsafe fn test_mm256_maskz_fmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_maskz_fmadd_ph(0b0101010101010101, a, b, c); + let e = _mm256_set_ph( + 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fcmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_fcmul_pch(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + unsafe fn test_mm512_fmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fmadd_ph(a, b, c); + let e = _mm512_set1_ph(5.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fcmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, - ); - let r = _mm512_mask_fcmul_pch(src, 0b0101010101010101, a, b); - let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + unsafe fn test_mm512_mask_fmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmadd_ph(a, 0b01010101010101010101010101010101, b, c); + let e = _mm512_set_ph( + 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, + 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fcmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_maskz_fcmul_pch(0b0101010101010101, a, b); - let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + unsafe fn test_mm512_mask3_fmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmadd_ph(a, b, c, 0b01010101010101010101010101010101); + let e = _mm512_set_ph( + 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, + 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fcmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + unsafe fn test_mm512_maskz_fmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmadd_ph(0b01010101010101010101010101010101, a, b, c); + let e = _mm512_set_ph( + 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, + 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fcmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, - ); - let r = _mm512_mask_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, - 0b0101010101010101, - a, - b, - ); - let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, - ); + unsafe fn test_mm512_fmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_ph(5.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fcmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_maskz_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b0101010101010101, + unsafe fn test_mm512_mask_fmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, + 0b01010101010101010101010101010101, b, + c, ); - let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fcmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_fcmul_sch(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fcmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_fcmul_sch(src, 0, a, b); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fcmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_maskz_fcmul_sch(0, a, b); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fcmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fcmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fcmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = - _mm_maskz_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_abs_ph() { - let a = _mm_set_ph(-1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0); - let r = _mm_abs_ph(a); - let e = _mm_set_ph(1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0); - assert_eq_m128h(r, e); + let e = _mm512_set_ph( + 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, + 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, + ); + assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_abs_ph() { - let a = _mm256_set_ph( - -1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, - -14.0, + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask3_fmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, + b, + c, + 0b01010101010101010101010101010101, ); - let r = _mm256_abs_ph(a); - let e = _mm256_set_ph( - 1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, + let e = _mm512_set_ph( + 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, + 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, ); - assert_eq_m256h(r, e); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_abs_ph() { - let a = _mm512_set_ph( - -1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, - -14.0, 15.0, -16.0, 17.0, -18.0, 19.0, -20.0, 21.0, -22.0, 23.0, -24.0, 25.0, -26.0, - 27.0, -28.0, 29.0, -30.0, + unsafe fn test_mm512_maskz_fmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + c, ); - let r = _mm512_abs_ph(a); let e = _mm512_set_ph( - 1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, - 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, - 29.0, 30.0, + 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, + 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, ); assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_conj_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let r = _mm_conj_pch(a); - let e = _mm_set1_pch(0.0, -1.0); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_fmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fmadd_sh(a, b, c); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_conj_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); - let r = _mm_mask_conj_pch(src, 0b0101, a); - let e = _mm_setr_ph(0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_fmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fmadd_sh(a, 0, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_conj_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let r = _mm_maskz_conj_pch(0b0101, a); - let e = _mm_setr_ph(0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0); + let r = _mm_mask_fmadd_sh(a, 1, b, c); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_conj_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_conj_pch(a); - let e = _mm256_set1_pch(0.0, -1.0); - assert_eq_m256h(r, e); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask3_fmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fmadd_sh(a, b, c, 0); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + assert_eq_m128h(r, e); + let r = _mm_mask3_fmadd_sh(a, b, c, 1); + let e = _mm_setr_ph(5.0, 30., 31., 32., 33., 34., 35., 36.); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_conj_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let src = _mm256_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - ); - let r = _mm256_mask_conj_pch(src, 0b01010101, a); - let e = _mm256_setr_ph( - 0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0, 0.0, -1.0, 12.0, 13.0, 0.0, -1.0, 16.0, 17.0, - ); - assert_eq_m256h(r, e); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_fmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fmadd_sh(0, a, b, c); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_maskz_fmadd_sh(1, a, b, c); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_conj_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_maskz_conj_pch(0b01010101, a); - let e = _mm256_setr_ph( - 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, - ); - assert_eq_m256h(r, e); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_fmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_conj_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_conj_pch(a); - let e = _mm512_set1_pch(0.0, -1.0); - assert_eq_m512h(r, e); + unsafe fn test_mm_mask_fmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0, b, c, + ); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_mask_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 1, b, c, + ); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_conj_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, + unsafe fn test_mm_mask3_fmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 0, ); - let r = _mm512_mask_conj_pch(src, 0b0101010101010101, a); - let e = _mm512_setr_ph( - 0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0, 0.0, -1.0, 12.0, 13.0, 0.0, -1.0, 16.0, 17.0, - 0.0, -1.0, 20.0, 21.0, 0.0, -1.0, 24.0, 25.0, 0.0, -1.0, 28.0, 29.0, 0.0, -1.0, 32.0, - 33.0, + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + assert_eq_m128h(r, e); + let r = _mm_mask3_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 1, ); - assert_eq_m512h(r, e); + let e = _mm_setr_ph(5.0, 30., 31., 32., 33., 34., 35., 36.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_conj_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_maskz_conj_pch(0b0101010101010101, a); - let e = _mm512_setr_ph( - 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, - 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, + unsafe fn test_mm_maskz_fmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0, a, b, c, ); - assert_eq_m512h(r, e); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_maskz_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 1, a, b, c, + ); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_fmadd_pch(a, b, c); - let e = _mm_set1_pch(-2.0, 3.0); + unsafe fn test_mm_fmsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_fmsub_ph(a, b, c); + let e = _mm_set1_ph(-1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_mask_fmadd_pch(a, 0b0101, b, c); - let e = _mm_setr_ph(-2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0); + unsafe fn test_mm_mask_fmsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask_fmsub_ph(a, 0b01010101, b, c); + let e = _mm_set_ph(1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_mask3_fmadd_pch(a, b, c, 0b0101); - let e = _mm_setr_ph(-2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0); + unsafe fn test_mm_mask3_fmsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask3_fmsub_ph(a, b, c, 0b01010101); + let e = _mm_set_ph(3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_maskz_fmadd_pch(0b0101, a, b, c); - let e = _mm_setr_ph(-2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0); + unsafe fn test_mm_maskz_fmsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_maskz_fmsub_ph(0b01010101, a, b, c); + let e = _mm_set_ph(0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_fmadd_pch(a, b, c); - let e = _mm256_set1_pch(-2.0, 3.0); + unsafe fn test_mm256_fmsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_fmsub_ph(a, b, c); + let e = _mm256_set1_ph(-1.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_mask_fmadd_pch(a, 0b01010101, b, c); - let e = _mm256_setr_ph( - -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, + unsafe fn test_mm256_mask_fmsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask_fmsub_ph(a, 0b0101010101010101, b, c); + let e = _mm256_set_ph( + 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_mask3_fmadd_pch(a, b, c, 0b01010101); - let e = _mm256_setr_ph( - -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, + unsafe fn test_mm256_mask3_fmsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask3_fmsub_ph(a, b, c, 0b0101010101010101); + let e = _mm256_set_ph( + 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_maskz_fmadd_pch(0b01010101, a, b, c); - let e = _mm256_setr_ph( - -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, + unsafe fn test_mm256_maskz_fmsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_maskz_fmsub_ph(0b0101010101010101, a, b, c); + let e = _mm256_set_ph( + 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_fmadd_pch(a, b, c); - let e = _mm512_set1_pch(-2.0, 3.0); + unsafe fn test_mm512_fmsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fmsub_ph(a, b, c); + let e = _mm512_set1_ph(-1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask_fmadd_pch(a, 0b0101010101010101, b, c); - let e = _mm512_setr_ph( - -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, - -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, + unsafe fn test_mm512_mask_fmsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmsub_ph(a, 0b01010101010101010101010101010101, b, c); + let e = _mm512_set_ph( + 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, + 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask3_fmadd_pch(a, b, c, 0b0101010101010101); - let e = _mm512_setr_ph( - -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, - -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, + unsafe fn test_mm512_mask3_fmsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmsub_ph(a, b, c, 0b01010101010101010101010101010101); + let e = _mm512_set_ph( + 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, + 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_maskz_fmadd_pch(0b0101010101010101, a, b, c); - let e = _mm512_setr_ph( - -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, - -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, + unsafe fn test_mm512_maskz_fmsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmsub_ph(0b01010101010101010101010101010101, a, b, c); + let e = _mm512_set_ph( + 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, + 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = - _mm512_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set1_pch(-2.0, 3.0); + unsafe fn test_mm512_fmsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_ph(-1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask_fmsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, - 0b0101010101010101, + 0b01010101010101010101010101010101, b, c, ); - let e = _mm512_setr_ph( - -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, - -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, + let e = _mm512_set_ph( + 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, + 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask3_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask3_fmsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, - 0b0101010101010101, + 0b01010101010101010101010101010101, ); - let e = _mm512_setr_ph( - -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, - -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, + let e = _mm512_set_ph( + 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, + 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_maskz_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b0101010101010101, + unsafe fn test_mm512_maskz_fmsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, a, b, c, ); - let e = _mm512_setr_ph( - -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, - -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, + let e = _mm512_set_ph( + 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, + 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_fmadd_sch(a, b, c); - let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_fmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fmsub_sh(a, b, c); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask_fmadd_sch(a, 0, b, c); - let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_mask_fmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fmsub_sh(a, 0, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_fmadd_sch(a, 1, b, c); - let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let r = _mm_mask_fmsub_sh(a, 1, b, c); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask3_fmadd_sch(a, b, c, 0); - let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + unsafe fn test_mm_mask3_fmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fmsub_sh(a, b, c, 0); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - let r = _mm_mask3_fmadd_sch(a, b, c, 1); - let e = _mm_setr_ph(-2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask3_fmsub_sh(a, b, c, 1); + let e = _mm_setr_ph(-1.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_maskz_fmadd_sch(0, a, b, c); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_maskz_fmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fmsub_sh(0, a, b, c); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_fmadd_sch(1, a, b, c); - let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let r = _mm_maskz_fmsub_sh(1, a, b, c); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_fmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_mask_fmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, 0, b, c, ); - let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, 1, b, c, ); - let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask3_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_mask3_fmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, 0, ); - let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - let r = _mm_mask3_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask3_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, 1, ); - let e = _mm_setr_ph(-2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let e = _mm_setr_ph(-1.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_maskz_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_maskz_fmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0, a, b, c, ); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_maskz_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 1, a, b, c, ); - let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fcmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_fcmadd_pch(a, b, c); - let e = _mm_set1_pch(2.0, 3.0); + unsafe fn test_mm_fnmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_fnmadd_ph(a, b, c); + let e = _mm_set1_ph(1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fcmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_mask_fcmadd_pch(a, 0b0101, b, c); - let e = _mm_setr_ph(2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0); + unsafe fn test_mm_mask_fnmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask_fnmadd_ph(a, 0b01010101, b, c); + let e = _mm_set_ph(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fcmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_mask3_fcmadd_pch(a, b, c, 0b0101); - let e = _mm_setr_ph(2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0); + unsafe fn test_mm_mask3_fnmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask3_fnmadd_ph(a, b, c, 0b01010101); + let e = _mm_set_ph(3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fcmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_maskz_fcmadd_pch(0b0101, a, b, c); - let e = _mm_setr_ph(2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0); + unsafe fn test_mm_maskz_fnmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_maskz_fnmadd_ph(0b01010101, a, b, c); + let e = _mm_set_ph(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fcmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_fcmadd_pch(a, b, c); - let e = _mm256_set1_pch(2.0, 3.0); + unsafe fn test_mm256_fnmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_fnmadd_ph(a, b, c); + let e = _mm256_set1_ph(1.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fcmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_mask_fcmadd_pch(a, 0b01010101, b, c); - let e = _mm256_setr_ph( - 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, + unsafe fn test_mm256_mask_fnmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask_fnmadd_ph(a, 0b0101010101010101, b, c); + let e = _mm256_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fcmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_mask3_fcmadd_pch(a, b, c, 0b01010101); - let e = _mm256_setr_ph( - 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, + unsafe fn test_mm256_mask3_fnmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask3_fnmadd_ph(a, b, c, 0b0101010101010101); + let e = _mm256_set_ph( + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fcmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_maskz_fcmadd_pch(0b01010101, a, b, c); - let e = _mm256_setr_ph( - 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, + unsafe fn test_mm256_maskz_fnmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_maskz_fnmadd_ph(0b0101010101010101, a, b, c); + let e = _mm256_set_ph( + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fcmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_fcmadd_pch(a, b, c); - let e = _mm512_set1_pch(2.0, 3.0); + unsafe fn test_mm512_fnmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fnmadd_ph(a, b, c); + let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fcmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask_fcmadd_pch(a, 0b0101010101010101, b, c); - let e = _mm512_setr_ph( - 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, - 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, + unsafe fn test_mm512_mask_fnmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fnmadd_ph(a, 0b01010101010101010101010101010101, b, c); + let e = _mm512_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fcmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask3_fcmadd_pch(a, b, c, 0b0101010101010101); - let e = _mm512_setr_ph( - 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, - 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, + unsafe fn test_mm512_mask3_fnmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fnmadd_ph(a, b, c, 0b01010101010101010101010101010101); + let e = _mm512_set_ph( + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, + 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fcmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_maskz_fcmadd_pch(0b0101010101010101, a, b, c); - let e = _mm512_setr_ph( - 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, - 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, + unsafe fn test_mm512_maskz_fnmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fnmadd_ph(0b01010101010101010101010101010101, a, b, c); + let e = _mm512_set_ph( + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fcmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); + unsafe fn test_mm512_fnmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); let r = - _mm512_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set1_pch(2.0, 3.0); + _mm512_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fcmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask_fnmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, - 0b0101010101010101, + 0b01010101010101010101010101010101, b, c, ); - let e = _mm512_setr_ph( - 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, - 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, + let e = _mm512_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fcmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask3_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask3_fnmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, - 0b0101010101010101, + 0b01010101010101010101010101010101, ); - let e = _mm512_setr_ph( - 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, - 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, + let e = _mm512_set_ph( + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, + 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fcmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_maskz_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b0101010101010101, + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_fnmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, a, b, c, ); - let e = _mm512_setr_ph( - 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, - 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, + let e = _mm512_set_ph( + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fcmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_fcmadd_sch(a, b, c); - let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_fnmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fnmadd_sh(a, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fcmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask_fcmadd_sch(a, 0, b, c); - let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_mask_fnmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fnmadd_sh(a, 0, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_fcmadd_sch(a, 1, b, c); - let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let r = _mm_mask_fnmadd_sh(a, 1, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fcmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask3_fcmadd_sch(a, b, c, 0); - let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + unsafe fn test_mm_mask3_fnmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fnmadd_sh(a, b, c, 0); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - let r = _mm_mask3_fcmadd_sch(a, b, c, 1); - let e = _mm_setr_ph(2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask3_fnmadd_sh(a, b, c, 1); + let e = _mm_setr_ph(1.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fcmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_maskz_fcmadd_sch(0, a, b, c); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_maskz_fnmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fnmadd_sh(0, a, b, c); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_fcmadd_sch(1, a, b, c); - let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let r = _mm_maskz_fnmadd_sh(1, a, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fcmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_fnmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fcmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_mask_fnmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, 0, b, c, ); - let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, 1, b, c, ); - let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fcmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask3_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_mask3_fnmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, 0, ); - let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - let r = _mm_mask3_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask3_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, 1, ); - let e = _mm_setr_ph(2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let e = _mm_setr_ph(1.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fcmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_maskz_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_maskz_fnmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0, a, b, c, ); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_maskz_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 1, a, b, c, ); - let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmadd_ph() { + unsafe fn test_mm_fnmsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_fmadd_ph(a, b, c); - let e = _mm_set1_ph(5.0); + let r = _mm_fnmsub_ph(a, b, c); + let e = _mm_set1_ph(-5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmadd_ph() { + unsafe fn test_mm_mask_fnmsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_mask_fmadd_ph(a, 0b01010101, b, c); - let e = _mm_set_ph(1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0); + let r = _mm_mask_fnmsub_ph(a, 0b01010101, b, c); + let e = _mm_set_ph(1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fmadd_ph() { + unsafe fn test_mm_mask3_fnmsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_mask3_fmadd_ph(a, b, c, 0b01010101); - let e = _mm_set_ph(3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0); + let r = _mm_mask3_fnmsub_ph(a, b, c, 0b01010101); + let e = _mm_set_ph(3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmadd_ph() { + unsafe fn test_mm_maskz_fnmsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_maskz_fmadd_ph(0b01010101, a, b, c); - let e = _mm_set_ph(0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0); + let r = _mm_maskz_fnmsub_ph(0b01010101, a, b, c); + let e = _mm_set_ph(0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmadd_ph() { + unsafe fn test_mm256_fnmsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_fmadd_ph(a, b, c); - let e = _mm256_set1_ph(5.0); + let r = _mm256_fnmsub_ph(a, b, c); + let e = _mm256_set1_ph(-5.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmadd_ph() { + unsafe fn test_mm256_mask_fnmsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_mask_fmadd_ph(a, 0b0101010101010101, b, c); + let r = _mm256_mask_fnmsub_ph(a, 0b0101010101010101, b, c); let e = _mm256_set_ph( - 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, + 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fmadd_ph() { + unsafe fn test_mm256_mask3_fnmsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_mask3_fmadd_ph(a, b, c, 0b0101010101010101); + let r = _mm256_mask3_fnmsub_ph(a, b, c, 0b0101010101010101); let e = _mm256_set_ph( - 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, + 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmadd_ph() { + unsafe fn test_mm256_maskz_fnmsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_maskz_fmadd_ph(0b0101010101010101, a, b, c); + let r = _mm256_maskz_fnmsub_ph(0b0101010101010101, a, b, c); let e = _mm256_set_ph( - 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, + 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmadd_ph() { + unsafe fn test_mm512_fnmsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_fmadd_ph(a, b, c); - let e = _mm512_set1_ph(5.0); + let r = _mm512_fnmsub_ph(a, b, c); + let e = _mm512_set1_ph(-5.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmadd_ph() { + unsafe fn test_mm512_mask_fnmsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmadd_ph(a, 0b01010101010101010101010101010101, b, c); + let r = _mm512_mask_fnmsub_ph(a, 0b01010101010101010101010101010101, b, c); let e = _mm512_set_ph( - 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, - 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, + 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, + 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmadd_ph() { + unsafe fn test_mm512_mask3_fnmsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmadd_ph(a, b, c, 0b01010101010101010101010101010101); + let r = _mm512_mask3_fnmsub_ph(a, b, c, 0b01010101010101010101010101010101); let e = _mm512_set_ph( - 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, - 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, + 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, + 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmadd_ph() { + unsafe fn test_mm512_maskz_fnmsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmadd_ph(0b01010101010101010101010101010101, a, b, c); + let r = _mm512_maskz_fnmsub_ph(0b01010101010101010101010101010101, a, b, c); let e = _mm512_set_ph( - 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, - 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, + 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, + 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmadd_round_ph() { + unsafe fn test_mm512_fnmsub_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set1_ph(5.0); + let r = + _mm512_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_ph(-5.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmadd_round_ph() { + unsafe fn test_mm512_mask_fnmsub_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, 0b01010101010101010101010101010101, b, c, ); let e = _mm512_set_ph( - 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, - 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, + 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, + 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmadd_round_ph() { + unsafe fn test_mm512_mask3_fnmsub_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask3_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, 0b01010101010101010101010101010101, ); let e = _mm512_set_ph( - 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, - 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, + 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, + 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmadd_round_ph() { + unsafe fn test_mm512_maskz_fnmsub_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_maskz_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b01010101010101010101010101010101, a, b, c, ); let e = _mm512_set_ph( - 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, - 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, + 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, + 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmadd_sh() { + unsafe fn test_mm_fnmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fmadd_sh(a, b, c); - let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_fnmsub_sh(a, b, c); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmadd_sh() { + unsafe fn test_mm_mask_fnmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fmadd_sh(a, 0, b, c); + let r = _mm_mask_fnmsub_sh(a, 0, b, c); let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_fmadd_sh(a, 1, b, c); - let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_mask_fnmsub_sh(a, 1, b, c); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fmadd_sh() { + unsafe fn test_mm_mask3_fnmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fmadd_sh(a, b, c, 0); + let r = _mm_mask3_fnmsub_sh(a, b, c, 0); let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - let r = _mm_mask3_fmadd_sh(a, b, c, 1); - let e = _mm_setr_ph(5.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fnmsub_sh(a, b, c, 1); + let e = _mm_setr_ph(-5.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmadd_sh() { + unsafe fn test_mm_maskz_fnmsub_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fmadd_sh(0, a, b, c); + let r = _mm_maskz_fnmsub_sh(0, a, b, c); let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_fmadd_sh(1, a, b, c); - let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_maskz_fnmsub_sh(1, a, b, c); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmadd_round_sh() { + unsafe fn test_mm_fnmsub_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmadd_round_sh() { + unsafe fn test_mm_mask_fnmsub_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, 0, b, c, ); let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, 1, b, c, ); - let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fmadd_round_sh() { + unsafe fn test_mm_mask3_fnmsub_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask3_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, 0, ); let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - let r = _mm_mask3_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask3_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, 1, ); - let e = _mm_setr_ph(5.0, 30., 31., 32., 33., 34., 35., 36.); + let e = _mm_setr_ph(-5.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmadd_round_sh() { + unsafe fn test_mm_maskz_fnmsub_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_maskz_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0, a, b, c, ); let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_maskz_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 1, a, b, c, ); - let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmsub_ph() { + unsafe fn test_mm_fmaddsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_fmsub_ph(a, b, c); - let e = _mm_set1_ph(-1.0); + let r = _mm_fmaddsub_ph(a, b, c); + let e = _mm_set_ph(5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmsub_ph() { + unsafe fn test_mm_mask_fmaddsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_mask_fmsub_ph(a, 0b01010101, b, c); - let e = _mm_set_ph(1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0); + let r = _mm_mask_fmaddsub_ph(a, 0b00110011, b, c); + let e = _mm_set_ph(1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fmsub_ph() { + unsafe fn test_mm_mask3_fmaddsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_mask3_fmsub_ph(a, b, c, 0b01010101); - let e = _mm_set_ph(3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0); + let r = _mm_mask3_fmaddsub_ph(a, b, c, 0b00110011); + let e = _mm_set_ph(3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmsub_ph() { + unsafe fn test_mm_maskz_fmaddsub_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_maskz_fmsub_ph(0b01010101, a, b, c); - let e = _mm_set_ph(0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0); + let r = _mm_maskz_fmaddsub_ph(0b00110011, a, b, c); + let e = _mm_set_ph(0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmsub_ph() { + unsafe fn test_mm256_fmaddsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_fmsub_ph(a, b, c); - let e = _mm256_set1_ph(-1.0); + let r = _mm256_fmaddsub_ph(a, b, c); + let e = _mm256_set_ph( + 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, + ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmsub_ph() { + unsafe fn test_mm256_mask_fmaddsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_mask_fmsub_ph(a, 0b0101010101010101, b, c); + let r = _mm256_mask_fmaddsub_ph(a, 0b0011001100110011, b, c); let e = _mm256_set_ph( - 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, + 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fmsub_ph() { + unsafe fn test_mm256_mask3_fmaddsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_mask3_fmsub_ph(a, b, c, 0b0101010101010101); + let r = _mm256_mask3_fmaddsub_ph(a, b, c, 0b0011001100110011); let e = _mm256_set_ph( - 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, + 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmsub_ph() { + unsafe fn test_mm256_maskz_fmaddsub_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_maskz_fmsub_ph(0b0101010101010101, a, b, c); + let r = _mm256_maskz_fmaddsub_ph(0b0011001100110011, a, b, c); let e = _mm256_set_ph( - 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, + 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_fmsub_ph(a, b, c); - let e = _mm512_set1_ph(-1.0); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmsub_ph() { + unsafe fn test_mm512_fmaddsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmsub_ph(a, 0b01010101010101010101010101010101, b, c); + let r = _mm512_fmaddsub_ph(a, b, c); let e = _mm512_set_ph( - 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, - 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, + 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, + 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmsub_ph() { + unsafe fn test_mm512_mask_fmaddsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmsub_ph(a, b, c, 0b01010101010101010101010101010101); + let r = _mm512_mask_fmaddsub_ph(a, 0b00110011001100110011001100110011, b, c); let e = _mm512_set_ph( - 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, - 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, + 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, + 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmsub_ph() { + unsafe fn test_mm512_mask3_fmaddsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmsub_ph(0b01010101010101010101010101010101, a, b, c); + let r = _mm512_mask3_fmaddsub_ph(a, b, c, 0b00110011001100110011001100110011); let e = _mm512_set_ph( - 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, - 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, + 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, + 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set1_ph(-1.0); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmsub_round_ph() { + unsafe fn test_mm512_maskz_fmaddsub_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, - 0b01010101010101010101010101010101, - b, - c, - ); + let r = _mm512_maskz_fmaddsub_ph(0b00110011001100110011001100110011, a, b, c); let e = _mm512_set_ph( - 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, - 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, + 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, + 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmsub_round_ph() { + unsafe fn test_mm512_fmaddsub_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, - b, - c, - 0b01010101010101010101010101010101, - ); + let r = + _mm512_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); let e = _mm512_set_ph( - 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, - 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, + 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, + 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmsub_round_ph() { + unsafe fn test_mm512_mask_fmaddsub_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, + let r = _mm512_mask_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, - b, - c, - ); - let e = _mm512_set_ph( - 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, - 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, - ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fmsub_sh(a, b, c); - let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fmsub_sh(a, 0, b, c); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_fmsub_sh(a, 1, b, c); - let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fmsub_sh(a, b, c, 0); - let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - assert_eq_m128h(r, e); - let r = _mm_mask3_fmsub_sh(a, b, c, 1); - let e = _mm_setr_ph(-1.0, 30., 31., 32., 33., 34., 35., 36.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fmsub_sh(0, a, b, c); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_maskz_fmsub_sh(1, a, b, c); - let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 0, b, c, - ); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 1, b, c, + 0b00110011001100110011001100110011, + b, + c, ); - let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); + let e = _mm512_set_ph( + 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, + 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 0, + unsafe fn test_mm512_mask3_fmaddsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, + b, + c, + 0b00110011001100110011001100110011, ); - let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - assert_eq_m128h(r, e); - let r = _mm_mask3_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 1, + let e = _mm512_set_ph( + 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, + 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, ); - let e = _mm_setr_ph(-1.0, 30., 31., 32., 33., 34., 35., 36.); - assert_eq_m128h(r, e); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0, a, b, c, + unsafe fn test_mm512_maskz_fmaddsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b00110011001100110011001100110011, + a, + b, + c, ); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_maskz_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 1, a, b, c, + let e = _mm512_set_ph( + 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, + 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, ); - let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fnmadd_ph() { + unsafe fn test_mm_fmsubadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_fnmadd_ph(a, b, c); - let e = _mm_set1_ph(1.0); + let r = _mm_fmsubadd_ph(a, b, c); + let e = _mm_set_ph(-1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fnmadd_ph() { + unsafe fn test_mm_mask_fmsubadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_mask_fnmadd_ph(a, 0b01010101, b, c); - let e = _mm_set_ph(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0); + let r = _mm_mask_fmsubadd_ph(a, 0b00110011, b, c); + let e = _mm_set_ph(1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fnmadd_ph() { + unsafe fn test_mm_mask3_fmsubadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_mask3_fnmadd_ph(a, b, c, 0b01010101); - let e = _mm_set_ph(3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0); + let r = _mm_mask3_fmsubadd_ph(a, b, c, 0b00110011); + let e = _mm_set_ph(3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fnmadd_ph() { + unsafe fn test_mm_maskz_fmsubadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); let c = _mm_set1_ph(3.0); - let r = _mm_maskz_fnmadd_ph(0b01010101, a, b, c); - let e = _mm_set_ph(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); + let r = _mm_maskz_fmsubadd_ph(0b00110011, a, b, c); + let e = _mm_set_ph(0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fnmadd_ph() { + unsafe fn test_mm256_fmsubadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_fnmadd_ph(a, b, c); - let e = _mm256_set1_ph(1.0); + let r = _mm256_fmsubadd_ph(a, b, c); + let e = _mm256_set_ph( + -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fnmadd_ph() { + unsafe fn test_mm256_mask_fmsubadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_mask_fnmadd_ph(a, 0b0101010101010101, b, c); + let r = _mm256_mask_fmsubadd_ph(a, 0b0011001100110011, b, c); let e = _mm256_set_ph( - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fnmadd_ph() { + unsafe fn test_mm256_mask3_fmsubadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_mask3_fnmadd_ph(a, b, c, 0b0101010101010101); + let r = _mm256_mask3_fmsubadd_ph(a, b, c, 0b0011001100110011); let e = _mm256_set_ph( - 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, + 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fnmadd_ph() { + unsafe fn test_mm256_maskz_fmsubadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); let c = _mm256_set1_ph(3.0); - let r = _mm256_maskz_fnmadd_ph(0b0101010101010101, a, b, c); + let r = _mm256_maskz_fmsubadd_ph(0b0011001100110011, a, b, c); let e = _mm256_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fnmadd_ph() { + unsafe fn test_mm512_fmsubadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_fnmadd_ph(a, b, c); - let e = _mm512_set1_ph(1.0); + let r = _mm512_fmsubadd_ph(a, b, c); + let e = _mm512_set_ph( + -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fnmadd_ph() { + unsafe fn test_mm512_mask_fmsubadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fnmadd_ph(a, 0b01010101010101010101010101010101, b, c); + let r = _mm512_mask_fmsubadd_ph(a, 0b00110011001100110011001100110011, b, c); let e = _mm512_set_ph( - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, + 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fnmadd_ph() { + unsafe fn test_mm512_mask3_fmsubadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fnmadd_ph(a, b, c, 0b01010101010101010101010101010101); + let r = _mm512_mask3_fmsubadd_ph(a, b, c, 0b00110011001100110011001100110011); let e = _mm512_set_ph( - 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, - 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, + 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, + 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fnmadd_ph() { + unsafe fn test_mm512_maskz_fmsubadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fnmadd_ph(0b01010101010101010101010101010101, a, b, c); + let r = _mm512_maskz_fmsubadd_ph(0b00110011001100110011001100110011, a, b, c); let e = _mm512_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, - 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, + 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fnmadd_round_ph() { + unsafe fn test_mm512_fmsubadd_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); let r = - _mm512_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set1_ph(1.0); + _mm512_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set_ph( + -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fnmadd_round_ph() { + unsafe fn test_mm512_mask_fmsubadd_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, - 0b01010101010101010101010101010101, + 0b00110011001100110011001100110011, b, c, ); let e = _mm512_set_ph( - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, + 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fnmadd_round_ph() { + unsafe fn test_mm512_mask3_fmsubadd_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask3_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, c, - 0b01010101010101010101010101010101, + 0b00110011001100110011001100110011, + ); + let e = _mm512_set_ph( + 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, + 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_fmsubadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b00110011001100110011001100110011, + a, + b, + c, + ); + let e = _mm512_set_ph( + 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, + 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_rcp_ph() { + let a = _mm_set1_ph(2.0); + let r = _mm_rcp_ph(a); + let e = _mm_set1_ph(0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_rcp_ph() { + let a = _mm_set1_ph(2.0); + let src = _mm_set1_ph(1.0); + let r = _mm_mask_rcp_ph(src, 0b01010101, a); + let e = _mm_set_ph(1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_rcp_ph() { + let a = _mm_set1_ph(2.0); + let r = _mm_maskz_rcp_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_rcp_ph() { + let a = _mm256_set1_ph(2.0); + let r = _mm256_rcp_ph(a); + let e = _mm256_set1_ph(0.5); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_rcp_ph() { + let a = _mm256_set1_ph(2.0); + let src = _mm256_set1_ph(1.0); + let r = _mm256_mask_rcp_ph(src, 0b0101010101010101, a); + let e = _mm256_set_ph( + 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_rcp_ph() { + let a = _mm256_set1_ph(2.0); + let r = _mm256_maskz_rcp_ph(0b0101010101010101, a); + let e = _mm256_set_ph( + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_rcp_ph() { + let a = _mm512_set1_ph(2.0); + let r = _mm512_rcp_ph(a); + let e = _mm512_set1_ph(0.5); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_rcp_ph() { + let a = _mm512_set1_ph(2.0); + let src = _mm512_set1_ph(1.0); + let r = _mm512_mask_rcp_ph(src, 0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, - 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, + 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, + 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fnmadd_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, - a, - b, - c, - ); + unsafe fn test_mm512_maskz_rcp_ph() { + let a = _mm512_set1_ph(2.0); + let r = _mm512_maskz_rcp_ph(0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, - 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, + 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fnmadd_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fnmadd_sh(a, b, c); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_rcp_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_rcp_sh(a, b); + let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fnmadd_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fnmadd_sh(a, 0, b, c); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_mask_rcp_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_rcp_sh(src, 0, a, b); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - let r = _mm_mask_fnmadd_sh(a, 1, b, c); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_mask_rcp_sh(src, 1, a, b); + let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fnmadd_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fnmadd_sh(a, b, c, 0); - let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + unsafe fn test_mm_maskz_rcp_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_maskz_rcp_sh(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - let r = _mm_mask3_fnmadd_sh(a, b, c, 1); - let e = _mm_setr_ph(1.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_rcp_sh(1, a, b); + let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fnmadd_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fnmadd_sh(0, a, b, c); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_rsqrt_ph() { + let a = _mm_set1_ph(4.0); + let r = _mm_rsqrt_ph(a); + let e = _mm_set1_ph(0.5); assert_eq_m128h(r, e); - let r = _mm_maskz_fnmadd_sh(1, a, b, c); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_rsqrt_ph() { + let a = _mm_set1_ph(4.0); + let src = _mm_set1_ph(1.0); + let r = _mm_mask_rsqrt_ph(src, 0b01010101, a); + let e = _mm_set_ph(1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fnmadd_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_rsqrt_ph() { + let a = _mm_set1_ph(4.0); + let r = _mm_maskz_rsqrt_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fnmadd_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 0, b, c, + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_rsqrt_ph() { + let a = _mm256_set1_ph(4.0); + let r = _mm256_rsqrt_ph(a); + let e = _mm256_set1_ph(0.5); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_rsqrt_ph() { + let a = _mm256_set1_ph(4.0); + let src = _mm256_set1_ph(1.0); + let r = _mm256_mask_rsqrt_ph(src, 0b0101010101010101, a); + let e = _mm256_set_ph( + 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, ); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 1, b, c, + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_rsqrt_ph() { + let a = _mm256_set1_ph(4.0); + let r = _mm256_maskz_rsqrt_ph(0b0101010101010101, a); + let e = _mm256_set_ph( + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, ); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fnmadd_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 0, - ); - let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - assert_eq_m128h(r, e); - let r = _mm_mask3_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 1, - ); - let e = _mm_setr_ph(1.0, 30., 31., 32., 33., 34., 35., 36.); - assert_eq_m128h(r, e); + unsafe fn test_mm512_rsqrt_ph() { + let a = _mm512_set1_ph(4.0); + let r = _mm512_rsqrt_ph(a); + let e = _mm512_set1_ph(0.5); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fnmadd_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0, a, b, c, + unsafe fn test_mm512_mask_rsqrt_ph() { + let a = _mm512_set1_ph(4.0); + let src = _mm512_set1_ph(1.0); + let r = _mm512_mask_rsqrt_ph(src, 0b01010101010101010101010101010101, a); + let e = _mm512_set_ph( + 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, + 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, ); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_maskz_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 1, a, b, c, + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_rsqrt_ph() { + let a = _mm512_set1_ph(4.0); + let r = _mm512_maskz_rsqrt_ph(0b01010101010101010101010101010101, a); + let e = _mm512_set_ph( + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, + 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, ); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_rsqrt_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let r = _mm_rsqrt_sh(a, b); + let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fnmsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_fnmsub_ph(a, b, c); - let e = _mm_set1_ph(-5.0); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_rsqrt_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_rsqrt_sh(src, 0, a, b); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_mask_rsqrt_sh(src, 1, a, b); + let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fnmsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask_fnmsub_ph(a, 0b01010101, b, c); - let e = _mm_set_ph(1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_rsqrt_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let r = _mm_maskz_rsqrt_sh(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_rsqrt_sh(1, a, b); + let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fnmsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask3_fnmsub_ph(a, b, c, 0b01010101); - let e = _mm_set_ph(3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0); + unsafe fn test_mm_sqrt_ph() { + let a = _mm_set1_ph(4.0); + let r = _mm_sqrt_ph(a); + let e = _mm_set1_ph(2.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fnmsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_maskz_fnmsub_ph(0b01010101, a, b, c); - let e = _mm_set_ph(0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0); + unsafe fn test_mm_mask_sqrt_ph() { + let a = _mm_set1_ph(4.0); + let src = _mm_set1_ph(1.0); + let r = _mm_mask_sqrt_ph(src, 0b01010101, a); + let e = _mm_set_ph(1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fnmsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_fnmsub_ph(a, b, c); - let e = _mm256_set1_ph(-5.0); - assert_eq_m256h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_sqrt_ph() { + let a = _mm_set1_ph(4.0); + let r = _mm_maskz_sqrt_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fnmsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask_fnmsub_ph(a, 0b0101010101010101, b, c); - let e = _mm256_set_ph( - 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, - ); + unsafe fn test_mm256_sqrt_ph() { + let a = _mm256_set1_ph(4.0); + let r = _mm256_sqrt_ph(a); + let e = _mm256_set1_ph(2.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fnmsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask3_fnmsub_ph(a, b, c, 0b0101010101010101); + unsafe fn test_mm256_mask_sqrt_ph() { + let a = _mm256_set1_ph(4.0); + let src = _mm256_set1_ph(1.0); + let r = _mm256_mask_sqrt_ph(src, 0b0101010101010101, a); let e = _mm256_set_ph( - 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, + 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fnmsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_maskz_fnmsub_ph(0b0101010101010101, a, b, c); + unsafe fn test_mm256_maskz_sqrt_ph() { + let a = _mm256_set1_ph(4.0); + let r = _mm256_maskz_sqrt_ph(0b0101010101010101, a); let e = _mm256_set_ph( - 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, + 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fnmsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_fnmsub_ph(a, b, c); - let e = _mm512_set1_ph(-5.0); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fnmsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fnmsub_ph(a, 0b01010101010101010101010101010101, b, c); - let e = _mm512_set_ph( - 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, - 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, - ); + unsafe fn test_mm512_sqrt_ph() { + let a = _mm512_set1_ph(4.0); + let r = _mm512_sqrt_ph(a); + let e = _mm512_set1_ph(2.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fnmsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fnmsub_ph(a, b, c, 0b01010101010101010101010101010101); + unsafe fn test_mm512_mask_sqrt_ph() { + let a = _mm512_set1_ph(4.0); + let src = _mm512_set1_ph(1.0); + let r = _mm512_mask_sqrt_ph(src, 0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, - 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, + 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, + 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fnmsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fnmsub_ph(0b01010101010101010101010101010101, a, b, c); + unsafe fn test_mm512_maskz_sqrt_ph() { + let a = _mm512_set1_ph(4.0); + let r = _mm512_maskz_sqrt_ph(0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, - 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, + 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, + 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fnmsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = - _mm512_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set1_ph(-5.0); + unsafe fn test_mm512_sqrt_round_ph() { + let a = _mm512_set1_ph(4.0); + let r = _mm512_sqrt_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + let e = _mm512_set1_ph(2.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fnmsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, + unsafe fn test_mm512_mask_sqrt_round_ph() { + let a = _mm512_set1_ph(4.0); + let src = _mm512_set1_ph(1.0); + let r = _mm512_mask_sqrt_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0b01010101010101010101010101010101, - b, - c, - ); - let e = _mm512_set_ph( - 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, - 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, - ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fnmsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, - b, - c, - 0b01010101010101010101010101010101, ); let e = _mm512_set_ph( - 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, - 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, + 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, + 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fnmsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_maskz_sqrt_round_ph() { + let a = _mm512_set1_ph(4.0); + let r = _mm512_maskz_sqrt_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b01010101010101010101010101010101, a, - b, - c, ); let e = _mm512_set_ph( - 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, - 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, + 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, + 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fnmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fnmsub_sh(a, b, c); - let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fnmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fnmsub_sh(a, 0, b, c); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_fnmsub_sh(a, 1, b, c); - let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fnmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fnmsub_sh(a, b, c, 0); - let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - assert_eq_m128h(r, e); - let r = _mm_mask3_fnmsub_sh(a, b, c, 1); - let e = _mm_setr_ph(-5.0, 30., 31., 32., 33., 34., 35., 36.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fnmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fnmsub_sh(0, a, b, c); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_maskz_fnmsub_sh(1, a, b, c); - let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fnmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_sqrt_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let r = _mm_sqrt_sh(a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fnmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 0, b, c, - ); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_mask_sqrt_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_sqrt_sh(src, 0, a, b); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - let r = _mm_mask_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 1, b, c, - ); - let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_mask_sqrt_sh(src, 1, a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fnmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 0, - ); - let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + unsafe fn test_mm_maskz_sqrt_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let r = _mm_maskz_sqrt_sh(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - let r = _mm_mask3_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 1, - ); - let e = _mm_setr_ph(-5.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_sqrt_sh(1, a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fnmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0, a, b, c, + unsafe fn test_mm_sqrt_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let r = _mm_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_sqrt_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, ); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - let r = _mm_maskz_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 1, a, b, c, + let r = _mm_mask_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, ); - let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmaddsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_fmaddsub_ph(a, b, c); - let e = _mm_set_ph(5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_sqrt_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let r = + _mm_maskz_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmaddsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask_fmaddsub_ph(a, 0b00110011, b, c); - let e = _mm_set_ph(1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0); + let r = + _mm_maskz_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fmaddsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask3_fmaddsub_ph(a, b, c, 0b00110011); - let e = _mm_set_ph(3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0); + unsafe fn test_mm_max_ph() { + let a = _mm_set1_ph(2.0); + let b = _mm_set1_ph(1.0); + let r = _mm_max_ph(a, b); + let e = _mm_set1_ph(2.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmaddsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_maskz_fmaddsub_ph(0b00110011, a, b, c); - let e = _mm_set_ph(0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0); + unsafe fn test_mm_mask_max_ph() { + let a = _mm_set1_ph(2.0); + let b = _mm_set1_ph(1.0); + let src = _mm_set1_ph(3.0); + let r = _mm_mask_max_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmaddsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_fmaddsub_ph(a, b, c); - let e = _mm256_set_ph( - 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, - ); - assert_eq_m256h(r, e); + unsafe fn test_mm_maskz_max_ph() { + let a = _mm_set1_ph(2.0); + let b = _mm_set1_ph(1.0); + let r = _mm_maskz_max_ph(0b01010101, a, b); + let e = _mm_set_ph(0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmaddsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask_fmaddsub_ph(a, 0b0011001100110011, b, c); - let e = _mm256_set_ph( - 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, - ); + unsafe fn test_mm256_max_ph() { + let a = _mm256_set1_ph(2.0); + let b = _mm256_set1_ph(1.0); + let r = _mm256_max_ph(a, b); + let e = _mm256_set1_ph(2.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fmaddsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask3_fmaddsub_ph(a, b, c, 0b0011001100110011); + unsafe fn test_mm256_mask_max_ph() { + let a = _mm256_set1_ph(2.0); + let b = _mm256_set1_ph(1.0); + let src = _mm256_set1_ph(3.0); + let r = _mm256_mask_max_ph(src, 0b0101010101010101, a, b); let e = _mm256_set_ph( - 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, + 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmaddsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_maskz_fmaddsub_ph(0b0011001100110011, a, b, c); + unsafe fn test_mm256_maskz_max_ph() { + let a = _mm256_set1_ph(2.0); + let b = _mm256_set1_ph(1.0); + let r = _mm256_maskz_max_ph(0b0101010101010101, a, b); let e = _mm256_set_ph( - 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, + 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmaddsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_fmaddsub_ph(a, b, c); - let e = _mm512_set_ph( - 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, - 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, - ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmaddsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmaddsub_ph(a, 0b00110011001100110011001100110011, b, c); - let e = _mm512_set_ph( - 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, - 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, - ); + unsafe fn test_mm512_max_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_max_ph(a, b); + let e = _mm512_set1_ph(2.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmaddsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmaddsub_ph(a, b, c, 0b00110011001100110011001100110011); + unsafe fn test_mm512_mask_max_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let src = _mm512_set1_ph(3.0); + let r = _mm512_mask_max_ph(src, 0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, - 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, + 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, + 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmaddsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmaddsub_ph(0b00110011001100110011001100110011, a, b, c); + unsafe fn test_mm512_maskz_max_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_maskz_max_ph(0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, - 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, + 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, + 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmaddsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = - _mm512_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set_ph( - 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, - 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, - ); + unsafe fn test_mm512_max_round_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_max_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_ph(2.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmaddsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask_max_round_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let src = _mm512_set1_ph(3.0); + let r = _mm512_mask_max_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, a, - 0b00110011001100110011001100110011, b, - c, ); let e = _mm512_set_ph( - 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, - 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, + 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, + 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmaddsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_maskz_max_round_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_maskz_max_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, a, b, - c, - 0b00110011001100110011001100110011, ); let e = _mm512_set_ph( - 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, - 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, + 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, + 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmaddsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b00110011001100110011001100110011, - a, - b, - c, + unsafe fn test_mm_max_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_max_sh(a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_max_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_max_sh(src, 0, a, b); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_mask_max_sh(src, 1, a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_max_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_maskz_max_sh(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_max_sh(1, a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_max_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_max_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, ); - let e = _mm512_set_ph( - 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, - 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_mask_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmsubadd_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_fmsubadd_ph(a, b, c); - let e = _mm_set_ph(-1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmsubadd_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask_fmsubadd_ph(a, 0b00110011, b, c); - let e = _mm_set_ph(1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_max_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = + _mm_maskz_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fmsubadd_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask3_fmsubadd_ph(a, b, c, 0b00110011); - let e = _mm_set_ph(3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0); + unsafe fn test_mm_min_ph() { + let a = _mm_set1_ph(2.0); + let b = _mm_set1_ph(1.0); + let r = _mm_min_ph(a, b); + let e = _mm_set1_ph(1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmsubadd_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_maskz_fmsubadd_ph(0b00110011, a, b, c); - let e = _mm_set_ph(0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0); + unsafe fn test_mm_mask_min_ph() { + let a = _mm_set1_ph(2.0); + let b = _mm_set1_ph(1.0); + let src = _mm_set1_ph(3.0); + let r = _mm_mask_min_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmsubadd_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_fmsubadd_ph(a, b, c); - let e = _mm256_set_ph( - -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, - ); - assert_eq_m256h(r, e); + unsafe fn test_mm_maskz_min_ph() { + let a = _mm_set1_ph(2.0); + let b = _mm_set1_ph(1.0); + let r = _mm_maskz_min_ph(0b01010101, a, b); + let e = _mm_set_ph(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmsubadd_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask_fmsubadd_ph(a, 0b0011001100110011, b, c); - let e = _mm256_set_ph( - 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, - ); + unsafe fn test_mm256_min_ph() { + let a = _mm256_set1_ph(2.0); + let b = _mm256_set1_ph(1.0); + let r = _mm256_min_ph(a, b); + let e = _mm256_set1_ph(1.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fmsubadd_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask3_fmsubadd_ph(a, b, c, 0b0011001100110011); + unsafe fn test_mm256_mask_min_ph() { + let a = _mm256_set1_ph(2.0); + let b = _mm256_set1_ph(1.0); + let src = _mm256_set1_ph(3.0); + let r = _mm256_mask_min_ph(src, 0b0101010101010101, a, b); let e = _mm256_set_ph( - 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmsubadd_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_maskz_fmsubadd_ph(0b0011001100110011, a, b, c); + unsafe fn test_mm256_maskz_min_ph() { + let a = _mm256_set1_ph(2.0); + let b = _mm256_set1_ph(1.0); + let r = _mm256_maskz_min_ph(0b0101010101010101, a, b); let e = _mm256_set_ph( - 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmsubadd_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_fmsubadd_ph(a, b, c); - let e = _mm512_set_ph( - -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, - -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, - ); + unsafe fn test_mm512_min_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_min_ph(a, b); + let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmsubadd_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmsubadd_ph(a, 0b00110011001100110011001100110011, b, c); + unsafe fn test_mm512_mask_min_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let src = _mm512_set1_ph(3.0); + let r = _mm512_mask_min_ph(src, 0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, - 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, + 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmsubadd_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmsubadd_ph(a, b, c, 0b00110011001100110011001100110011); + unsafe fn test_mm512_maskz_min_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_maskz_min_ph(0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, - 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmsubadd_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmsubadd_ph(0b00110011001100110011001100110011, a, b, c); - let e = _mm512_set_ph( - 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, - 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, - ); + unsafe fn test_mm512_min_round_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_min_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmsubadd_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = - _mm512_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + unsafe fn test_mm512_mask_min_round_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let src = _mm512_set1_ph(3.0); + let r = _mm512_mask_min_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + b, + ); let e = _mm512_set_ph( - -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, - -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, + 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmsubadd_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_maskz_min_round_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_maskz_min_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, a, - 0b00110011001100110011001100110011, b, - c, ); let e = _mm512_set_ph( - 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, - 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmsubadd_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, - b, - c, - 0b00110011001100110011001100110011, - ); - let e = _mm512_set_ph( - 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, - 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, - ); - assert_eq_m512h(r, e); + unsafe fn test_mm_min_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_min_sh(a, b); + let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_min_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_min_sh(src, 0, a, b); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_mask_min_sh(src, 1, a, b); + let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_min_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_maskz_min_sh(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_min_sh(1, a, b); + let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_min_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmsubadd_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b00110011001100110011001100110011, - a, - b, - c, + unsafe fn test_mm_mask_min_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, ); - let e = _mm512_set_ph( - 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, - 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_mask_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, ); - assert_eq_m512h(r, e); + let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_min_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = + _mm_maskz_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_rcp_ph() { - let a = _mm_set1_ph(2.0); - let r = _mm_rcp_ph(a); - let e = _mm_set1_ph(0.5); + unsafe fn test_mm_getexp_ph() { + let a = _mm_set1_ph(3.0); + let r = _mm_getexp_ph(a); + let e = _mm_set1_ph(1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_rcp_ph() { - let a = _mm_set1_ph(2.0); - let src = _mm_set1_ph(1.0); - let r = _mm_mask_rcp_ph(src, 0b01010101, a); - let e = _mm_set_ph(1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5); + unsafe fn test_mm_mask_getexp_ph() { + let a = _mm_set1_ph(3.0); + let src = _mm_set1_ph(4.0); + let r = _mm_mask_getexp_ph(src, 0b01010101, a); + let e = _mm_set_ph(4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_rcp_ph() { - let a = _mm_set1_ph(2.0); - let r = _mm_maskz_rcp_ph(0b01010101, a); - let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); + unsafe fn test_mm_maskz_getexp_ph() { + let a = _mm_set1_ph(3.0); + let r = _mm_maskz_getexp_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_rcp_ph() { - let a = _mm256_set1_ph(2.0); - let r = _mm256_rcp_ph(a); - let e = _mm256_set1_ph(0.5); + unsafe fn test_mm256_getexp_ph() { + let a = _mm256_set1_ph(3.0); + let r = _mm256_getexp_ph(a); + let e = _mm256_set1_ph(1.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_rcp_ph() { - let a = _mm256_set1_ph(2.0); - let src = _mm256_set1_ph(1.0); - let r = _mm256_mask_rcp_ph(src, 0b0101010101010101, a); + unsafe fn test_mm256_mask_getexp_ph() { + let a = _mm256_set1_ph(3.0); + let src = _mm256_set1_ph(4.0); + let r = _mm256_mask_getexp_ph(src, 0b0101010101010101, a); let e = _mm256_set_ph( - 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, + 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_rcp_ph() { - let a = _mm256_set1_ph(2.0); - let r = _mm256_maskz_rcp_ph(0b0101010101010101, a); + unsafe fn test_mm256_maskz_getexp_ph() { + let a = _mm256_set1_ph(3.0); + let r = _mm256_maskz_getexp_ph(0b0101010101010101, a); let e = _mm256_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_rcp_ph() { - let a = _mm512_set1_ph(2.0); - let r = _mm512_rcp_ph(a); - let e = _mm512_set1_ph(0.5); + unsafe fn test_mm512_getexp_ph() { + let a = _mm512_set1_ph(3.0); + let r = _mm512_getexp_ph(a); + let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_rcp_ph() { - let a = _mm512_set1_ph(2.0); - let src = _mm512_set1_ph(1.0); - let r = _mm512_mask_rcp_ph(src, 0b01010101010101010101010101010101, a); + unsafe fn test_mm512_mask_getexp_ph() { + let a = _mm512_set1_ph(3.0); + let src = _mm512_set1_ph(4.0); + let r = _mm512_mask_getexp_ph(src, 0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, - 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, + 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, + 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_rcp_ph() { - let a = _mm512_set1_ph(2.0); - let r = _mm512_maskz_rcp_ph(0b01010101010101010101010101010101, a); + unsafe fn test_mm512_maskz_getexp_ph() { + let a = _mm512_set1_ph(3.0); + let r = _mm512_maskz_getexp_ph(0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, - 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_rcp_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let r = _mm_rcp_sh(a, b); - let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm512_getexp_round_ph() { + let a = _mm512_set1_ph(3.0); + let r = _mm512_getexp_round_ph::<_MM_FROUND_NO_EXC>(a); + let e = _mm512_set1_ph(1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_getexp_round_ph() { + let a = _mm512_set1_ph(3.0); + let src = _mm512_set1_ph(4.0); + let r = _mm512_mask_getexp_round_ph::<_MM_FROUND_NO_EXC>( + src, + 0b01010101010101010101010101010101, + a, + ); + let e = _mm512_set_ph( + 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, + 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_getexp_round_ph() { + let a = _mm512_set1_ph(3.0); + let r = _mm512_maskz_getexp_round_ph::<_MM_FROUND_NO_EXC>( + 0b01010101010101010101010101010101, + a, + ); + let e = _mm512_set_ph( + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_getexp_sh() { + let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_getexp_sh(a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_rcp_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); - let r = _mm_mask_rcp_sh(src, 0, a, b); - let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_mask_getexp_sh() { + let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(4.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_getexp_sh(src, 0, a, b); + let e = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_rcp_sh(src, 1, a, b); - let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_mask_getexp_sh(src, 1, a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_rcp_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let r = _mm_maskz_rcp_sh(0, a, b); - let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_maskz_getexp_sh() { + let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_maskz_getexp_sh(0, a, b); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_rcp_sh(1, a, b); - let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_maskz_getexp_sh(1, a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_getexp_round_sh() { + let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_getexp_round_sh::<_MM_FROUND_NO_EXC>(a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_getexp_round_sh() { + let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(4.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_getexp_round_sh::<_MM_FROUND_NO_EXC>(src, 0, a, b); + let e = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_mask_getexp_round_sh::<_MM_FROUND_NO_EXC>(src, 1, a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_getexp_round_sh() { + let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_maskz_getexp_round_sh::<_MM_FROUND_NO_EXC>(0, a, b); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_maskz_getexp_round_sh::<_MM_FROUND_NO_EXC>(1, a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_rsqrt_ph() { - let a = _mm_set1_ph(4.0); - let r = _mm_rsqrt_ph(a); - let e = _mm_set1_ph(0.5); + unsafe fn test_mm_getmant_ph() { + let a = _mm_set1_ph(10.0); + let r = _mm_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(a); + let e = _mm_set1_ph(1.25); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_rsqrt_ph() { - let a = _mm_set1_ph(4.0); - let src = _mm_set1_ph(1.0); - let r = _mm_mask_rsqrt_ph(src, 0b01010101, a); - let e = _mm_set_ph(1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5); + unsafe fn test_mm_mask_getmant_ph() { + let a = _mm_set1_ph(10.0); + let src = _mm_set1_ph(20.0); + let r = _mm_mask_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(src, 0b01010101, a); + let e = _mm_set_ph(20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_rsqrt_ph() { - let a = _mm_set1_ph(4.0); - let r = _mm_maskz_rsqrt_ph(0b01010101, a); - let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); + unsafe fn test_mm_maskz_getmant_ph() { + let a = _mm_set1_ph(10.0); + let r = _mm_maskz_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(0b01010101, a); + let e = _mm_set_ph(0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_rsqrt_ph() { - let a = _mm256_set1_ph(4.0); - let r = _mm256_rsqrt_ph(a); - let e = _mm256_set1_ph(0.5); + unsafe fn test_mm256_getmant_ph() { + let a = _mm256_set1_ph(10.0); + let r = _mm256_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(a); + let e = _mm256_set1_ph(1.25); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_rsqrt_ph() { - let a = _mm256_set1_ph(4.0); - let src = _mm256_set1_ph(1.0); - let r = _mm256_mask_rsqrt_ph(src, 0b0101010101010101, a); + unsafe fn test_mm256_mask_getmant_ph() { + let a = _mm256_set1_ph(10.0); + let src = _mm256_set1_ph(20.0); + let r = _mm256_mask_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>( + src, + 0b0101010101010101, + a, + ); let e = _mm256_set_ph( - 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, + 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, + 20.0, 1.25, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_rsqrt_ph() { - let a = _mm256_set1_ph(4.0); - let r = _mm256_maskz_rsqrt_ph(0b0101010101010101, a); + unsafe fn test_mm256_maskz_getmant_ph() { + let a = _mm256_set1_ph(10.0); + let r = _mm256_maskz_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>( + 0b0101010101010101, + a, + ); let e = _mm256_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_rsqrt_ph() { - let a = _mm512_set1_ph(4.0); - let r = _mm512_rsqrt_ph(a); - let e = _mm512_set1_ph(0.5); + unsafe fn test_mm512_getmant_ph() { + let a = _mm512_set1_ph(10.0); + let r = _mm512_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(a); + let e = _mm512_set1_ph(1.25); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_rsqrt_ph() { - let a = _mm512_set1_ph(4.0); - let src = _mm512_set1_ph(1.0); - let r = _mm512_mask_rsqrt_ph(src, 0b01010101010101010101010101010101, a); + unsafe fn test_mm512_mask_getmant_ph() { + let a = _mm512_set1_ph(10.0); + let src = _mm512_set1_ph(20.0); + let r = _mm512_mask_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>( + src, + 0b01010101010101010101010101010101, + a, + ); let e = _mm512_set_ph( - 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, - 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, + 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, + 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, + 20.0, 1.25, 20.0, 1.25, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_rsqrt_ph() { - let a = _mm512_set1_ph(4.0); - let r = _mm512_maskz_rsqrt_ph(0b01010101010101010101010101010101, a); + unsafe fn test_mm512_maskz_getmant_ph() { + let a = _mm512_set1_ph(10.0); + let r = _mm512_maskz_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>( + 0b01010101010101010101010101010101, + a, + ); let e = _mm512_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, - 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, + 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_rsqrt_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let r = _mm_rsqrt_sh(a, b); - let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm512_getmant_round_ph() { + let a = _mm512_set1_ph(10.0); + let r = + _mm512_getmant_round_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN, _MM_FROUND_NO_EXC>( + a, + ); + let e = _mm512_set1_ph(1.25); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_getmant_round_ph() { + let a = _mm512_set1_ph(10.0); + let src = _mm512_set1_ph(20.0); + let r = _mm512_mask_getmant_round_ph::< + _MM_MANT_NORM_P75_1P5, + _MM_MANT_SIGN_NAN, + _MM_FROUND_NO_EXC, + >(src, 0b01010101010101010101010101010101, a); + let e = _mm512_set_ph( + 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, + 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, + 20.0, 1.25, 20.0, 1.25, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_getmant_round_ph() { + let a = _mm512_set1_ph(10.0); + let r = _mm512_maskz_getmant_round_ph::< + _MM_MANT_NORM_P75_1P5, + _MM_MANT_SIGN_NAN, + _MM_FROUND_NO_EXC, + >(0b01010101010101010101010101010101, a); + let e = _mm512_set_ph( + 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, + 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_getmant_sh() { + let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_getmant_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(a, b); + let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_rsqrt_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); - let r = _mm_mask_rsqrt_sh(src, 0, a, b); - let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_mask_getmant_sh() { + let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(20.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_getmant_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(src, 0, a, b); + let e = _mm_setr_ph(20.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_rsqrt_sh(src, 1, a, b); - let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_mask_getmant_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(src, 1, a, b); + let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_rsqrt_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let r = _mm_maskz_rsqrt_sh(0, a, b); - let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_maskz_getmant_sh() { + let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_maskz_getmant_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(0, a, b); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_rsqrt_sh(1, a, b); - let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_maskz_getmant_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(1, a, b); + let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_sqrt_ph() { - let a = _mm_set1_ph(4.0); - let r = _mm_sqrt_ph(a); - let e = _mm_set1_ph(2.0); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_getmant_round_sh() { + let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_getmant_round_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN, _MM_FROUND_NO_EXC>( + a, b, + ); + let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_getmant_round_sh() { + let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(20.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_getmant_round_sh::< + _MM_MANT_NORM_P75_1P5, + _MM_MANT_SIGN_NAN, + _MM_FROUND_NO_EXC, + >(src, 0, a, b); + let e = _mm_setr_ph(20.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_mask_getmant_round_sh::< + _MM_MANT_NORM_P75_1P5, + _MM_MANT_SIGN_NAN, + _MM_FROUND_NO_EXC, + >(src, 1, a, b); + let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_getmant_round_sh() { + let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_maskz_getmant_round_sh::< + _MM_MANT_NORM_P75_1P5, + _MM_MANT_SIGN_NAN, + _MM_FROUND_NO_EXC, + >(0, a, b); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_maskz_getmant_round_sh::< + _MM_MANT_NORM_P75_1P5, + _MM_MANT_SIGN_NAN, + _MM_FROUND_NO_EXC, + >(1, a, b); + let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_sqrt_ph() { - let a = _mm_set1_ph(4.0); - let src = _mm_set1_ph(1.0); - let r = _mm_mask_sqrt_ph(src, 0b01010101, a); - let e = _mm_set_ph(1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0); + unsafe fn test_mm_roundscale_ph() { + let a = _mm_set1_ph(1.1); + let r = _mm_roundscale_ph::<0>(a); + let e = _mm_set1_ph(1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_sqrt_ph() { - let a = _mm_set1_ph(4.0); - let r = _mm_maskz_sqrt_ph(0b01010101, a); - let e = _mm_set_ph(0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0); + unsafe fn test_mm_mask_roundscale_ph() { + let a = _mm_set1_ph(1.1); + let src = _mm_set1_ph(2.0); + let r = _mm_mask_roundscale_ph::<0>(src, 0b01010101, a); + let e = _mm_set_ph(2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_roundscale_ph() { + let a = _mm_set1_ph(1.1); + let r = _mm_maskz_roundscale_ph::<0>(0b01010101, a); + let e = _mm_set_ph(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_sqrt_ph() { - let a = _mm256_set1_ph(4.0); - let r = _mm256_sqrt_ph(a); - let e = _mm256_set1_ph(2.0); + unsafe fn test_mm256_roundscale_ph() { + let a = _mm256_set1_ph(1.1); + let r = _mm256_roundscale_ph::<0>(a); + let e = _mm256_set1_ph(1.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_sqrt_ph() { - let a = _mm256_set1_ph(4.0); - let src = _mm256_set1_ph(1.0); - let r = _mm256_mask_sqrt_ph(src, 0b0101010101010101, a); + unsafe fn test_mm256_mask_roundscale_ph() { + let a = _mm256_set1_ph(1.1); + let src = _mm256_set1_ph(2.0); + let r = _mm256_mask_roundscale_ph::<0>(src, 0b0101010101010101, a); let e = _mm256_set_ph( - 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, + 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_sqrt_ph() { - let a = _mm256_set1_ph(4.0); - let r = _mm256_maskz_sqrt_ph(0b0101010101010101, a); + unsafe fn test_mm256_maskz_roundscale_ph() { + let a = _mm256_set1_ph(1.1); + let r = _mm256_maskz_roundscale_ph::<0>(0b0101010101010101, a); let e = _mm256_set_ph( - 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_sqrt_ph() { - let a = _mm512_set1_ph(4.0); - let r = _mm512_sqrt_ph(a); - let e = _mm512_set1_ph(2.0); + unsafe fn test_mm512_roundscale_ph() { + let a = _mm512_set1_ph(1.1); + let r = _mm512_roundscale_ph::<0>(a); + let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_sqrt_ph() { - let a = _mm512_set1_ph(4.0); - let src = _mm512_set1_ph(1.0); - let r = _mm512_mask_sqrt_ph(src, 0b01010101010101010101010101010101, a); + unsafe fn test_mm512_mask_roundscale_ph() { + let a = _mm512_set1_ph(1.1); + let src = _mm512_set1_ph(2.0); + let r = _mm512_mask_roundscale_ph::<0>(src, 0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, - 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, + 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, + 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_sqrt_ph() { - let a = _mm512_set1_ph(4.0); - let r = _mm512_maskz_sqrt_ph(0b01010101010101010101010101010101, a); + unsafe fn test_mm512_maskz_roundscale_ph() { + let a = _mm512_set1_ph(1.1); + let r = _mm512_maskz_roundscale_ph::<0>(0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, - 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_sqrt_round_ph() { - let a = _mm512_set1_ph(4.0); - let r = _mm512_sqrt_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); - let e = _mm512_set1_ph(2.0); + unsafe fn test_mm512_roundscale_round_ph() { + let a = _mm512_set1_ph(1.1); + let r = _mm512_roundscale_round_ph::<0, _MM_FROUND_NO_EXC>(a); + let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_sqrt_round_ph() { - let a = _mm512_set1_ph(4.0); - let src = _mm512_set1_ph(1.0); - let r = _mm512_mask_sqrt_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask_roundscale_round_ph() { + let a = _mm512_set1_ph(1.1); + let src = _mm512_set1_ph(2.0); + let r = _mm512_mask_roundscale_round_ph::<0, _MM_FROUND_NO_EXC>( src, 0b01010101010101010101010101010101, a, ); let e = _mm512_set_ph( - 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, - 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, + 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, + 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_sqrt_round_ph() { - let a = _mm512_set1_ph(4.0); - let r = _mm512_maskz_sqrt_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_maskz_roundscale_round_ph() { + let a = _mm512_set1_ph(1.1); + let r = _mm512_maskz_roundscale_round_ph::<0, _MM_FROUND_NO_EXC>( 0b01010101010101010101010101010101, a, ); let e = _mm512_set_ph( - 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, - 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_sqrt_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let r = _mm_sqrt_sh(a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_roundscale_sh() { + let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_roundscale_sh::<0>(a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_sqrt_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); - let r = _mm_mask_sqrt_sh(src, 0, a, b); - let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_mask_roundscale_sh() { + let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_roundscale_sh::<0>(src, 0, a, b); + let e = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_sqrt_sh(src, 1, a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_mask_roundscale_sh::<0>(src, 1, a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_sqrt_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let r = _mm_maskz_sqrt_sh(0, a, b); - let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_maskz_roundscale_sh() { + let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_maskz_roundscale_sh::<0>(0, a, b); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_sqrt_sh(1, a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_maskz_roundscale_sh::<0>(1, a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_sqrt_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let r = _mm_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_roundscale_round_sh() { + let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_roundscale_round_sh::<0, _MM_FROUND_NO_EXC>(a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_sqrt_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); - let r = _mm_mask_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_mask_roundscale_round_sh() { + let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_roundscale_round_sh::<0, _MM_FROUND_NO_EXC>(src, 0, a, b); + let e = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_mask_roundscale_round_sh::<0, _MM_FROUND_NO_EXC>(src, 1, a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_sqrt_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let r = - _mm_maskz_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_maskz_roundscale_round_sh() { + let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_maskz_roundscale_round_sh::<0, _MM_FROUND_NO_EXC>(0, a, b); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = - _mm_maskz_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_maskz_roundscale_round_sh::<0, _MM_FROUND_NO_EXC>(1, a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_max_ph() { - let a = _mm_set1_ph(2.0); - let b = _mm_set1_ph(1.0); - let r = _mm_max_ph(a, b); - let e = _mm_set1_ph(2.0); + unsafe fn test_mm_scalef_ph() { + let a = _mm_set1_ph(1.); + let b = _mm_set1_ph(3.); + let r = _mm_scalef_ph(a, b); + let e = _mm_set1_ph(8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_max_ph() { - let a = _mm_set1_ph(2.0); - let b = _mm_set1_ph(1.0); - let src = _mm_set1_ph(3.0); - let r = _mm_mask_max_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0); + unsafe fn test_mm_mask_scalef_ph() { + let a = _mm_set1_ph(1.); + let b = _mm_set1_ph(3.); + let src = _mm_set1_ph(2.); + let r = _mm_mask_scalef_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_max_ph() { - let a = _mm_set1_ph(2.0); - let b = _mm_set1_ph(1.0); - let r = _mm_maskz_max_ph(0b01010101, a, b); - let e = _mm_set_ph(0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0); + unsafe fn test_mm_maskz_scalef_ph() { + let a = _mm_set1_ph(1.); + let b = _mm_set1_ph(3.); + let r = _mm_maskz_scalef_ph(0b01010101, a, b); + let e = _mm_set_ph(0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_max_ph() { - let a = _mm256_set1_ph(2.0); - let b = _mm256_set1_ph(1.0); - let r = _mm256_max_ph(a, b); - let e = _mm256_set1_ph(2.0); + unsafe fn test_mm256_scalef_ph() { + let a = _mm256_set1_ph(1.); + let b = _mm256_set1_ph(3.); + let r = _mm256_scalef_ph(a, b); + let e = _mm256_set1_ph(8.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_max_ph() { - let a = _mm256_set1_ph(2.0); - let b = _mm256_set1_ph(1.0); - let src = _mm256_set1_ph(3.0); - let r = _mm256_mask_max_ph(src, 0b0101010101010101, a, b); + unsafe fn test_mm256_mask_scalef_ph() { + let a = _mm256_set1_ph(1.); + let b = _mm256_set1_ph(3.); + let src = _mm256_set1_ph(2.); + let r = _mm256_mask_scalef_ph(src, 0b0101010101010101, a, b); let e = _mm256_set_ph( - 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, + 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_max_ph() { - let a = _mm256_set1_ph(2.0); - let b = _mm256_set1_ph(1.0); - let r = _mm256_maskz_max_ph(0b0101010101010101, a, b); + unsafe fn test_mm256_maskz_scalef_ph() { + let a = _mm256_set1_ph(1.); + let b = _mm256_set1_ph(3.); + let r = _mm256_maskz_scalef_ph(0b0101010101010101, a, b); let e = _mm256_set_ph( - 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, + 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_max_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let r = _mm512_max_ph(a, b); - let e = _mm512_set1_ph(2.0); + unsafe fn test_mm512_scalef_ph() { + let a = _mm512_set1_ph(1.); + let b = _mm512_set1_ph(3.); + let r = _mm512_scalef_ph(a, b); + let e = _mm512_set1_ph(8.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_max_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let src = _mm512_set1_ph(3.0); - let r = _mm512_mask_max_ph(src, 0b01010101010101010101010101010101, a, b); + unsafe fn test_mm512_mask_scalef_ph() { + let a = _mm512_set1_ph(1.); + let b = _mm512_set1_ph(3.); + let src = _mm512_set1_ph(2.); + let r = _mm512_mask_scalef_ph(src, 0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, - 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, + 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, + 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_max_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let r = _mm512_maskz_max_ph(0b01010101010101010101010101010101, a, b); + unsafe fn test_mm512_maskz_scalef_ph() { + let a = _mm512_set1_ph(1.); + let b = _mm512_set1_ph(3.); + let r = _mm512_maskz_scalef_ph(0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, - 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, + 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, + 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_max_round_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let r = _mm512_max_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_ph(2.0); + unsafe fn test_mm512_scalef_round_ph() { + let a = _mm512_set1_ph(1.); + let b = _mm512_set1_ph(3.); + let r = _mm512_scalef_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_ph(8.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_max_round_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let src = _mm512_set1_ph(3.0); - let r = _mm512_mask_max_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask_scalef_round_ph() { + let a = _mm512_set1_ph(1.); + let b = _mm512_set1_ph(3.); + let src = _mm512_set1_ph(2.); + let r = _mm512_mask_scalef_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 0b01010101010101010101010101010101, a, b, ); let e = _mm512_set_ph( - 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, - 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, + 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, + 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_max_round_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let r = _mm512_maskz_max_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_maskz_scalef_round_ph() { + let a = _mm512_set1_ph(1.); + let b = _mm512_set1_ph(3.); + let r = _mm512_maskz_scalef_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b01010101010101010101010101010101, a, b, ); let e = _mm512_set_ph( - 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, - 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, + 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, + 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_max_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let r = _mm_max_sh(a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_scalef_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_scalef_sh(a, b); + let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_max_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); - let r = _mm_mask_max_sh(src, 0, a, b); - let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_mask_scalef_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(2.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_scalef_sh(src, 0, a, b); + let e = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_max_sh(src, 1, a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_mask_scalef_sh(src, 1, a, b); + let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_max_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let r = _mm_maskz_max_sh(0, a, b); - let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_maskz_scalef_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_maskz_scalef_sh(0, a, b); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_max_sh(1, a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_maskz_scalef_sh(1, a, b); + let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_max_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let r = _mm_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_scalef_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_scalef_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_max_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); - let r = _mm_mask_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_mask_scalef_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(2.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_scalef_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 0, a, b, ); - let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let e = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask_scalef_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 1, a, b, ); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_max_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + unsafe fn test_mm_maskz_scalef_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); let r = - _mm_maskz_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + _mm_maskz_scalef_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); let r = - _mm_maskz_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + _mm_maskz_scalef_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_min_ph() { - let a = _mm_set1_ph(2.0); - let b = _mm_set1_ph(1.0); - let r = _mm_min_ph(a, b); - let e = _mm_set1_ph(1.0); + unsafe fn test_mm_reduce_ph() { + let a = _mm_set1_ph(1.25); + let r = _mm_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(a); + let e = _mm_set1_ph(0.25); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_min_ph() { - let a = _mm_set1_ph(2.0); - let b = _mm_set1_ph(1.0); - let src = _mm_set1_ph(3.0); - let r = _mm_mask_min_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0); + unsafe fn test_mm_mask_reduce_ph() { + let a = _mm_set1_ph(1.25); + let src = _mm_set1_ph(2.0); + let r = _mm_mask_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(src, 0b01010101, a); + let e = _mm_set_ph(2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_min_ph() { - let a = _mm_set1_ph(2.0); - let b = _mm_set1_ph(1.0); - let r = _mm_maskz_min_ph(0b01010101, a, b); - let e = _mm_set_ph(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); + unsafe fn test_mm_maskz_reduce_ph() { + let a = _mm_set1_ph(1.25); + let r = _mm_maskz_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(0b01010101, a); + let e = _mm_set_ph(0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_min_ph() { - let a = _mm256_set1_ph(2.0); - let b = _mm256_set1_ph(1.0); - let r = _mm256_min_ph(a, b); - let e = _mm256_set1_ph(1.0); + unsafe fn test_mm256_reduce_ph() { + let a = _mm256_set1_ph(1.25); + let r = _mm256_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(a); + let e = _mm256_set1_ph(0.25); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_min_ph() { - let a = _mm256_set1_ph(2.0); - let b = _mm256_set1_ph(1.0); - let src = _mm256_set1_ph(3.0); - let r = _mm256_mask_min_ph(src, 0b0101010101010101, a, b); + unsafe fn test_mm256_mask_reduce_ph() { + let a = _mm256_set1_ph(1.25); + let src = _mm256_set1_ph(2.0); + let r = _mm256_mask_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(src, 0b0101010101010101, a); let e = _mm256_set_ph( - 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, + 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_min_ph() { - let a = _mm256_set1_ph(2.0); - let b = _mm256_set1_ph(1.0); - let r = _mm256_maskz_min_ph(0b0101010101010101, a, b); + unsafe fn test_mm256_maskz_reduce_ph() { + let a = _mm256_set1_ph(1.25); + let r = _mm256_maskz_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(0b0101010101010101, a); let e = _mm256_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_min_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let r = _mm512_min_ph(a, b); - let e = _mm512_set1_ph(1.0); + unsafe fn test_mm512_reduce_ph() { + let a = _mm512_set1_ph(1.25); + let r = _mm512_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(a); + let e = _mm512_set1_ph(0.25); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_min_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let src = _mm512_set1_ph(3.0); - let r = _mm512_mask_min_ph(src, 0b01010101010101010101010101010101, a, b); + unsafe fn test_mm512_mask_reduce_ph() { + let a = _mm512_set1_ph(1.25); + let src = _mm512_set1_ph(2.0); + let r = _mm512_mask_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>( + src, + 0b01010101010101010101010101010101, + a, + ); let e = _mm512_set_ph( - 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, - 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, + 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, + 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_min_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let r = _mm512_maskz_min_ph(0b01010101010101010101010101010101, a, b); + unsafe fn test_mm512_maskz_reduce_ph() { + let a = _mm512_set1_ph(1.25); + let r = _mm512_maskz_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>( + 0b01010101010101010101010101010101, + a, + ); let e = _mm512_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, - 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, + 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_min_round_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let r = _mm512_min_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_ph(1.0); + unsafe fn test_mm512_reduce_round_ph() { + let a = _mm512_set1_ph(1.25); + let r = _mm512_reduce_round_ph::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>(a); + let e = _mm512_set1_ph(0.25); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_min_round_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let src = _mm512_set1_ph(3.0); - let r = _mm512_mask_min_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask_reduce_round_ph() { + let a = _mm512_set1_ph(1.25); + let src = _mm512_set1_ph(2.0); + let r = _mm512_mask_reduce_round_ph::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>( src, 0b01010101010101010101010101010101, a, - b, ); let e = _mm512_set_ph( - 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, - 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, + 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, + 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_min_round_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let r = _mm512_maskz_min_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_maskz_reduce_round_ph() { + let a = _mm512_set1_ph(1.25); + let r = _mm512_maskz_reduce_round_ph::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>( 0b01010101010101010101010101010101, a, - b, ); let e = _mm512_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, - 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, + 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_min_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let r = _mm_min_sh(a, b); - let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_reduce_sh() { + let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_reduce_sh::<{ 16 | _MM_FROUND_TO_ZERO }>(a, b); + let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_min_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); - let r = _mm_mask_min_sh(src, 0, a, b); - let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_mask_reduce_sh() { + let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(2.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_reduce_sh::<{ 16 | _MM_FROUND_TO_ZERO }>(src, 0, a, b); + let e = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_min_sh(src, 1, a, b); - let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_mask_reduce_sh::<{ 16 | _MM_FROUND_TO_ZERO }>(src, 1, a, b); + let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_min_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let r = _mm_maskz_min_sh(0, a, b); - let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_maskz_reduce_sh() { + let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_maskz_reduce_sh::<{ 16 | _MM_FROUND_TO_ZERO }>(0, a, b); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_min_sh(1, a, b); - let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_maskz_reduce_sh::<{ 16 | _MM_FROUND_TO_ZERO }>(1, a, b); + let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_min_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let r = _mm_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_reduce_round_sh() { + let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_reduce_round_sh::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>(a, b); + let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_min_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); - let r = _mm_mask_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_mask_reduce_round_sh() { + let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(2.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_reduce_round_sh::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>( src, 0, a, b, ); - let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let e = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask_reduce_round_sh::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>( src, 1, a, b, ); - let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_min_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + unsafe fn test_mm_maskz_reduce_round_sh() { + let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); let r = - _mm_maskz_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + _mm_maskz_reduce_round_sh::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>(0, a, b); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); let r = - _mm_maskz_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + _mm_maskz_reduce_round_sh::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>(1, a, b); + let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_getexp_ph() { - let a = _mm_set1_ph(3.0); - let r = _mm_getexp_ph(a); - let e = _mm_set1_ph(1.0); - assert_eq_m128h(r, e); + unsafe fn test_mm_reduce_add_ph() { + let a = _mm_set1_ph(2.0); + let r = _mm_reduce_add_ph(a); + assert_eq!(r, 16.0); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_getexp_ph() { - let a = _mm_set1_ph(3.0); - let src = _mm_set1_ph(4.0); - let r = _mm_mask_getexp_ph(src, 0b01010101, a); - let e = _mm_set_ph(4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0); - assert_eq_m128h(r, e); + unsafe fn test_mm256_reduce_add_ph() { + let a = _mm256_set1_ph(2.0); + let r = _mm256_reduce_add_ph(a); + assert_eq!(r, 32.0); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_getexp_ph() { - let a = _mm_set1_ph(3.0); - let r = _mm_maskz_getexp_ph(0b01010101, a); - let e = _mm_set_ph(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); - assert_eq_m128h(r, e); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_reduce_add_ph() { + let a = _mm512_set1_ph(2.0); + let r = _mm512_reduce_add_ph(a); + assert_eq!(r, 64.0); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_getexp_ph() { - let a = _mm256_set1_ph(3.0); - let r = _mm256_getexp_ph(a); - let e = _mm256_set1_ph(1.0); - assert_eq_m256h(r, e); + unsafe fn test_mm_reduce_mul_ph() { + let a = _mm_set1_ph(2.0); + let r = _mm_reduce_mul_ph(a); + assert_eq!(r, 256.0); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_getexp_ph() { - let a = _mm256_set1_ph(3.0); - let src = _mm256_set1_ph(4.0); - let r = _mm256_mask_getexp_ph(src, 0b0101010101010101, a); - let e = _mm256_set_ph( - 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, - ); - assert_eq_m256h(r, e); + unsafe fn test_mm256_reduce_mul_ph() { + let a = _mm256_set1_ph(2.0); + let r = _mm256_reduce_mul_ph(a); + assert_eq!(r, 65536.0); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_getexp_ph() { - let a = _mm256_set1_ph(3.0); - let r = _mm256_maskz_getexp_ph(0b0101010101010101, a); - let e = _mm256_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, - ); - assert_eq_m256h(r, e); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_reduce_mul_ph() { + let a = _mm512_set1_ph(2.0); + let r = _mm512_reduce_mul_ph(a); + assert_eq!(r, 16777216.0); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_getexp_ph() { - let a = _mm512_set1_ph(3.0); - let r = _mm512_getexp_ph(a); - let e = _mm512_set1_ph(1.0); - assert_eq_m512h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_reduce_max_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_reduce_max_ph(a); + assert_eq!(r, 8.0); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_getexp_ph() { - let a = _mm512_set1_ph(3.0); - let src = _mm512_set1_ph(4.0); - let r = _mm512_mask_getexp_ph(src, 0b01010101010101010101010101010101, a); - let e = _mm512_set_ph( - 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, - 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_reduce_max_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - assert_eq_m512h(r, e); + let r = _mm256_reduce_max_ph(a); + assert_eq!(r, 16.0); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_getexp_ph() { - let a = _mm512_set1_ph(3.0); - let r = _mm512_maskz_getexp_ph(0b01010101010101010101010101010101, a); - let e = _mm512_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, - 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + unsafe fn test_mm512_reduce_max_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); - assert_eq_m512h(r, e); + let r = _mm512_reduce_max_ph(a); + assert_eq!(r, 32.0); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_getexp_round_ph() { - let a = _mm512_set1_ph(3.0); - let r = _mm512_getexp_round_ph::<_MM_FROUND_NO_EXC>(a); - let e = _mm512_set1_ph(1.0); - assert_eq_m512h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_reduce_min_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_reduce_min_ph(a); + assert_eq!(r, 1.0); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_getexp_round_ph() { - let a = _mm512_set1_ph(3.0); - let src = _mm512_set1_ph(4.0); - let r = _mm512_mask_getexp_round_ph::<_MM_FROUND_NO_EXC>( - src, - 0b01010101010101010101010101010101, - a, - ); - let e = _mm512_set_ph( - 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, - 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_reduce_min_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - assert_eq_m512h(r, e); + let r = _mm256_reduce_min_ph(a); + assert_eq!(r, 1.0); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_getexp_round_ph() { - let a = _mm512_set1_ph(3.0); - let r = _mm512_maskz_getexp_round_ph::<_MM_FROUND_NO_EXC>( - 0b01010101010101010101010101010101, - a, + unsafe fn test_mm512_reduce_min_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); - let e = _mm512_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, - 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + let r = _mm512_reduce_min_ph(a); + assert_eq!(r, 1.0); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_fpclass_ph_mask() { + let a = _mm_set_ph( + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal ); - assert_eq_m512h(r, e); + let r = _mm_fpclass_ph_mask::<0x18>(a); // infinities + assert_eq!(r, 0b01100000); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_getexp_sh() { - let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_getexp_sh(a, b); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_fpclass_ph_mask() { + let a = _mm_set_ph( + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + ); + let r = _mm_mask_fpclass_ph_mask::<0x18>(0b01010101, a); + assert_eq!(r, 0b01000000); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_getexp_sh() { - let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); - let src = _mm_setr_ph(4.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_getexp_sh(src, 0, a, b); - let e = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_getexp_sh(src, 1, a, b); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_fpclass_ph_mask() { + let a = _mm256_set_ph( + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + ); + let r = _mm256_fpclass_ph_mask::<0x18>(a); // infinities + assert_eq!(r, 0b0110000001100000); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_getexp_sh() { - let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_maskz_getexp_sh(0, a, b); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_maskz_getexp_sh(1, a, b); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_fpclass_ph_mask() { + let a = _mm256_set_ph( + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + ); + let r = _mm256_mask_fpclass_ph_mask::<0x18>(0b0101010101010101, a); + assert_eq!(r, 0b0100000001000000); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_getexp_round_sh() { - let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_getexp_round_sh::<_MM_FROUND_NO_EXC>(a, b); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); + unsafe fn test_mm512_fpclass_ph_mask() { + let a = _mm512_set_ph( + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + ); + let r = _mm512_fpclass_ph_mask::<0x18>(a); // infinities + assert_eq!(r, 0b01100000011000000110000001100000); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_getexp_round_sh() { - let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); - let src = _mm_setr_ph(4.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_getexp_round_sh::<_MM_FROUND_NO_EXC>(src, 0, a, b); - let e = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_getexp_round_sh::<_MM_FROUND_NO_EXC>(src, 1, a, b); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); + unsafe fn test_mm512_mask_fpclass_ph_mask() { + let a = _mm512_set_ph( + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + ); + let r = _mm512_mask_fpclass_ph_mask::<0x18>(0b01010101010101010101010101010101, a); + assert_eq!(r, 0b01000000010000000100000001000000); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_getexp_round_sh() { - let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_maskz_getexp_round_sh::<_MM_FROUND_NO_EXC>(0, a, b); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_maskz_getexp_round_sh::<_MM_FROUND_NO_EXC>(1, a, b); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_getmant_ph() { - let a = _mm_set1_ph(10.0); - let r = _mm_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(a); - let e = _mm_set1_ph(1.25); - assert_eq_m128h(r, e); + unsafe fn test_mm_fpclass_sh_mask() { + let a = _mm_set_sh(f16::INFINITY); + let r = _mm_fpclass_sh_mask::<0x18>(a); + assert_eq!(r, 1); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_getmant_ph() { - let a = _mm_set1_ph(10.0); - let src = _mm_set1_ph(20.0); - let r = _mm_mask_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(src, 0b01010101, a); - let e = _mm_set_ph(20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25); - assert_eq_m128h(r, e); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_fpclass_sh_mask() { + let a = _mm_set_sh(f16::INFINITY); + let r = _mm_mask_fpclass_sh_mask::<0x18>(0, a); + assert_eq!(r, 0); + let r = _mm_mask_fpclass_sh_mask::<0x18>(1, a); + assert_eq!(r, 1); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_getmant_ph() { - let a = _mm_set1_ph(10.0); - let r = _mm_maskz_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(0b01010101, a); - let e = _mm_set_ph(0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25); + unsafe fn test_mm_mask_blend_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(-1.0, -2.0, -3.0, -4.0, -5.0, -6.0, -7.0, -8.0); + let r = _mm_mask_blend_ph(0b01010101, a, b); + let e = _mm_set_ph(1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_getmant_ph() { - let a = _mm256_set1_ph(10.0); - let r = _mm256_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(a); - let e = _mm256_set1_ph(1.25); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_getmant_ph() { - let a = _mm256_set1_ph(10.0); - let src = _mm256_set1_ph(20.0); - let r = _mm256_mask_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>( - src, - 0b0101010101010101, - a, - ); - let e = _mm256_set_ph( - 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, - 20.0, 1.25, + unsafe fn test_mm256_mask_blend_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_getmant_ph() { - let a = _mm256_set1_ph(10.0); - let r = _mm256_maskz_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>( - 0b0101010101010101, - a, + let b = _mm256_set_ph( + -1.0, -2.0, -3.0, -4.0, -5.0, -6.0, -7.0, -8.0, -9.0, -10.0, -11.0, -12.0, -13.0, + -14.0, -15.0, -16.0, ); + let r = _mm256_mask_blend_ph(0b0101010101010101, a, b); let e = _mm256_set_ph( - 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, + 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, -14.0, 15.0, + -16.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_getmant_ph() { - let a = _mm512_set1_ph(10.0); - let r = _mm512_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(a); - let e = _mm512_set1_ph(1.25); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_getmant_ph() { - let a = _mm512_set1_ph(10.0); - let src = _mm512_set1_ph(20.0); - let r = _mm512_mask_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>( - src, - 0b01010101010101010101010101010101, - a, - ); - let e = _mm512_set_ph( - 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, - 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, - 20.0, 1.25, 20.0, 1.25, + unsafe fn test_mm512_mask_blend_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_getmant_ph() { - let a = _mm512_set1_ph(10.0); - let r = _mm512_maskz_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>( - 0b01010101010101010101010101010101, - a, + let b = _mm512_set_ph( + -1.0, -2.0, -3.0, -4.0, -5.0, -6.0, -7.0, -8.0, -9.0, -10.0, -11.0, -12.0, -13.0, + -14.0, -15.0, -16.0, -17.0, -18.0, -19.0, -20.0, -21.0, -22.0, -23.0, -24.0, -25.0, + -26.0, -27.0, -28.0, -29.0, -30.0, -31.0, -32.0, ); + let r = _mm512_mask_blend_ph(0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, - 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, + 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, -14.0, 15.0, + -16.0, 17.0, -18.0, 19.0, -20.0, 21.0, -22.0, 23.0, -24.0, 25.0, -26.0, 27.0, -28.0, + 29.0, -30.0, 31.0, -32.0, ); assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_getmant_round_ph() { - let a = _mm512_set1_ph(10.0); - let r = - _mm512_getmant_round_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN, _MM_FROUND_NO_EXC>( - a, - ); - let e = _mm512_set1_ph(1.25); - assert_eq_m512h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_permutex2var_ph() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_setr_ph(9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let idx = _mm_setr_epi16(0, 2, 4, 6, 8, 10, 12, 14); + let r = _mm_permutex2var_ph(a, idx, b); + let e = _mm_setr_ph(1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_getmant_round_ph() { - let a = _mm512_set1_ph(10.0); - let src = _mm512_set1_ph(20.0); - let r = _mm512_mask_getmant_round_ph::< - _MM_MANT_NORM_P75_1P5, - _MM_MANT_SIGN_NAN, - _MM_FROUND_NO_EXC, - >(src, 0b01010101010101010101010101010101, a); - let e = _mm512_set_ph( - 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, - 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, - 20.0, 1.25, 20.0, 1.25, + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_permutex2var_ph() { + let a = _mm256_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - assert_eq_m512h(r, e); + let b = _mm256_setr_ph( + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let idx = _mm256_setr_epi16(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + let r = _mm256_permutex2var_ph(a, idx, b); + let e = _mm256_setr_ph( + 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, 23.0, 25.0, 27.0, 29.0, + 31.0, + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_getmant_round_ph() { - let a = _mm512_set1_ph(10.0); - let r = _mm512_maskz_getmant_round_ph::< - _MM_MANT_NORM_P75_1P5, - _MM_MANT_SIGN_NAN, - _MM_FROUND_NO_EXC, - >(0b01010101010101010101010101010101, a); - let e = _mm512_set_ph( - 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, - 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, + unsafe fn test_mm512_permutex2var_ph() { + let a = _mm512_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_setr_ph( + 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, + 47.0, 48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, + 61.0, 62.0, 63.0, 64.0, + ); + let idx = _mm512_set_epi16( + 62, 60, 58, 56, 54, 52, 50, 48, 46, 44, 42, 40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, + 18, 16, 14, 12, 10, 8, 6, 4, 2, 0, + ); + let r = _mm512_permutex2var_ph(a, idx, b); + let e = _mm512_setr_ph( + 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, 23.0, 25.0, 27.0, 29.0, + 31.0, 33.0, 35.0, 37.0, 39.0, 41.0, 43.0, 45.0, 47.0, 49.0, 51.0, 53.0, 55.0, 57.0, + 59.0, 61.0, 63.0, ); assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_getmant_sh() { - let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_getmant_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(a, b); - let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_getmant_sh() { - let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); - let src = _mm_setr_ph(20.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_getmant_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(src, 0, a, b); - let e = _mm_setr_ph(20.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_getmant_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(src, 1, a, b); - let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_getmant_sh() { - let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_maskz_getmant_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(0, a, b); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_maskz_getmant_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(1, a, b); - let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_permutexvar_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let idx = _mm_set_epi16(0, 2, 4, 6, 1, 3, 5, 7); + let r = _mm_permutexvar_ph(idx, a); + let e = _mm_setr_ph(1.0, 3.0, 5.0, 7.0, 2.0, 4.0, 6.0, 8.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_getmant_round_sh() { - let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_getmant_round_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN, _MM_FROUND_NO_EXC>( - a, b, + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_permutexvar_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_getmant_round_sh() { - let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); - let src = _mm_setr_ph(20.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_getmant_round_sh::< - _MM_MANT_NORM_P75_1P5, - _MM_MANT_SIGN_NAN, - _MM_FROUND_NO_EXC, - >(src, 0, a, b); - let e = _mm_setr_ph(20.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_getmant_round_sh::< - _MM_MANT_NORM_P75_1P5, - _MM_MANT_SIGN_NAN, - _MM_FROUND_NO_EXC, - >(src, 1, a, b); - let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); + let idx = _mm256_set_epi16(0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15); + let r = _mm256_permutexvar_ph(idx, a); + let e = _mm256_setr_ph( + 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_getmant_round_sh() { - let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_maskz_getmant_round_sh::< - _MM_MANT_NORM_P75_1P5, - _MM_MANT_SIGN_NAN, - _MM_FROUND_NO_EXC, - >(0, a, b); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_maskz_getmant_round_sh::< - _MM_MANT_NORM_P75_1P5, - _MM_MANT_SIGN_NAN, - _MM_FROUND_NO_EXC, - >(1, a, b); - let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); + unsafe fn test_mm512_permutexvar_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let idx = _mm512_set_epi16( + 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 1, 3, 5, 7, 9, 11, 13, 15, + 17, 19, 21, 23, 25, 27, 29, 31, + ); + let r = _mm512_permutexvar_ph(idx, a); + let e = _mm512_setr_ph( + 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, 23.0, 25.0, 27.0, 29.0, + 31.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0, 22.0, 24.0, 26.0, 28.0, + 30.0, 32.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_roundscale_ph() { - let a = _mm_set1_ph(1.1); - let r = _mm_roundscale_ph::<0>(a); - let e = _mm_set1_ph(1.0); + unsafe fn test_mm_cvtepi16_ph() { + let a = _mm_set_epi16(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm_cvtepi16_ph(a); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_roundscale_ph() { - let a = _mm_set1_ph(1.1); - let src = _mm_set1_ph(2.0); - let r = _mm_mask_roundscale_ph::<0>(src, 0b01010101, a); - let e = _mm_set_ph(2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0); + unsafe fn test_mm_mask_cvtepi16_ph() { + let a = _mm_set_epi16(1, 2, 3, 4, 5, 6, 7, 8); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_cvtepi16_ph(src, 0b01010101, a); + let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_roundscale_ph() { - let a = _mm_set1_ph(1.1); - let r = _mm_maskz_roundscale_ph::<0>(0b01010101, a); - let e = _mm_set_ph(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); + unsafe fn test_mm_maskz_cvtepi16_ph() { + let a = _mm_set_epi16(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm_maskz_cvtepi16_ph(0b01010101, a); + let e = _mm_set_ph(0., 2., 0., 4., 0., 6., 0., 8.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_roundscale_ph() { - let a = _mm256_set1_ph(1.1); - let r = _mm256_roundscale_ph::<0>(a); - let e = _mm256_set1_ph(1.0); + unsafe fn test_mm256_cvtepi16_ph() { + let a = _mm256_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let r = _mm256_cvtepi16_ph(a); + let e = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_roundscale_ph() { - let a = _mm256_set1_ph(1.1); - let src = _mm256_set1_ph(2.0); - let r = _mm256_mask_roundscale_ph::<0>(src, 0b0101010101010101, a); + unsafe fn test_mm256_mask_cvtepi16_ph() { + let a = _mm256_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let src = _mm256_set_ph( + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., + ); + let r = _mm256_mask_cvtepi16_ph(src, 0b0101010101010101, a); let e = _mm256_set_ph( - 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, + 10., 2., 12., 4., 14., 6., 16., 8., 18., 10., 20., 12., 22., 14., 24., 16., ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_roundscale_ph() { - let a = _mm256_set1_ph(1.1); - let r = _mm256_maskz_roundscale_ph::<0>(0b0101010101010101, a); + unsafe fn test_mm256_maskz_cvtepi16_ph() { + let a = _mm256_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let r = _mm256_maskz_cvtepi16_ph(0b0101010101010101, a); let e = _mm256_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + 0., 2., 0., 4., 0., 6., 0., 8., 0., 10., 0., 12., 0., 14., 0., 16., ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_roundscale_ph() { - let a = _mm512_set1_ph(1.1); - let r = _mm512_roundscale_ph::<0>(a); - let e = _mm512_set1_ph(1.0); + unsafe fn test_mm512_cvtepi16_ph() { + let a = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + let r = _mm512_cvtepi16_ph(a); + let e = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_roundscale_ph() { - let a = _mm512_set1_ph(1.1); - let src = _mm512_set1_ph(2.0); - let r = _mm512_mask_roundscale_ph::<0>(src, 0b01010101010101010101010101010101, a); + unsafe fn test_mm512_mask_cvtepi16_ph() { + let a = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + let src = _mm512_set_ph( + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., + 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., + ); + let r = _mm512_mask_cvtepi16_ph(src, 0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, - 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, + 10., 2., 12., 4., 14., 6., 16., 8., 18., 10., 20., 12., 22., 14., 24., 16., 26., 18., + 28., 20., 30., 22., 32., 24., 34., 26., 36., 28., 38., 30., 40., 32., ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_roundscale_ph() { - let a = _mm512_set1_ph(1.1); - let r = _mm512_maskz_roundscale_ph::<0>(0b01010101010101010101010101010101, a); + unsafe fn test_mm512_maskz_cvtepi16_ph() { + let a = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + let r = _mm512_maskz_cvtepi16_ph(0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, - 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + 0., 2., 0., 4., 0., 6., 0., 8., 0., 10., 0., 12., 0., 14., 0., 16., 0., 18., 0., 20., + 0., 22., 0., 24., 0., 26., 0., 28., 0., 30., 0., 32., ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_roundscale_round_ph() { - let a = _mm512_set1_ph(1.1); - let r = _mm512_roundscale_round_ph::<0, _MM_FROUND_NO_EXC>(a); - let e = _mm512_set1_ph(1.0); + unsafe fn test_mm512_cvt_roundepi16_ph() { + let a = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + let r = _mm512_cvt_roundepi16_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + let e = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_roundscale_round_ph() { - let a = _mm512_set1_ph(1.1); - let src = _mm512_set1_ph(2.0); - let r = _mm512_mask_roundscale_round_ph::<0, _MM_FROUND_NO_EXC>( + unsafe fn test_mm512_mask_cvt_roundepi16_ph() { + let a = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + let src = _mm512_set_ph( + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., + 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., + ); + let r = _mm512_mask_cvt_roundepi16_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 0b01010101010101010101010101010101, a, ); let e = _mm512_set_ph( - 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, - 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, + 10., 2., 12., 4., 14., 6., 16., 8., 18., 10., 20., 12., 22., 14., 24., 16., 26., 18., + 28., 20., 30., 22., 32., 24., 34., 26., 36., 28., 38., 30., 40., 32., ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_roundscale_round_ph() { - let a = _mm512_set1_ph(1.1); - let r = _mm512_maskz_roundscale_round_ph::<0, _MM_FROUND_NO_EXC>( + unsafe fn test_mm512_maskz_cvt_roundepi16_ph() { + let a = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + let r = _mm512_maskz_cvt_roundepi16_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b01010101010101010101010101010101, a, ); let e = _mm512_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, - 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + 0., 2., 0., 4., 0., 6., 0., 8., 0., 10., 0., 12., 0., 14., 0., 16., 0., 18., 0., 20., + 0., 22., 0., 24., 0., 26., 0., 28., 0., 30., 0., 32., ); assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_roundscale_sh() { - let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_roundscale_sh::<0>(a, b); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_roundscale_sh() { - let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); - let src = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_roundscale_sh::<0>(src, 0, a, b); - let e = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_roundscale_sh::<0>(src, 1, a, b); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_roundscale_sh() { - let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_maskz_roundscale_sh::<0>(0, a, b); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_maskz_roundscale_sh::<0>(1, a, b); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_roundscale_round_sh() { - let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_roundscale_round_sh::<0, _MM_FROUND_NO_EXC>(a, b); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_roundscale_round_sh() { - let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); - let src = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_roundscale_round_sh::<0, _MM_FROUND_NO_EXC>(src, 0, a, b); - let e = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_roundscale_round_sh::<0, _MM_FROUND_NO_EXC>(src, 1, a, b); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_roundscale_round_sh() { - let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_maskz_roundscale_round_sh::<0, _MM_FROUND_NO_EXC>(0, a, b); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_maskz_roundscale_round_sh::<0, _MM_FROUND_NO_EXC>(1, a, b); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_scalef_ph() { - let a = _mm_set1_ph(1.); - let b = _mm_set1_ph(3.); - let r = _mm_scalef_ph(a, b); - let e = _mm_set1_ph(8.0); + unsafe fn test_mm_cvtepu16_ph() { + let a = _mm_set_epi16(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm_cvtepu16_ph(a); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_scalef_ph() { - let a = _mm_set1_ph(1.); - let b = _mm_set1_ph(3.); - let src = _mm_set1_ph(2.); - let r = _mm_mask_scalef_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0); + unsafe fn test_mm_mask_cvtepu16_ph() { + let a = _mm_set_epi16(1, 2, 3, 4, 5, 6, 7, 8); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_cvtepu16_ph(src, 0b01010101, a); + let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_scalef_ph() { - let a = _mm_set1_ph(1.); - let b = _mm_set1_ph(3.); - let r = _mm_maskz_scalef_ph(0b01010101, a, b); - let e = _mm_set_ph(0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0); + unsafe fn test_mm_maskz_cvtepu16_ph() { + let a = _mm_set_epi16(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm_maskz_cvtepu16_ph(0b01010101, a); + let e = _mm_set_ph(0., 2., 0., 4., 0., 6., 0., 8.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_scalef_ph() { - let a = _mm256_set1_ph(1.); - let b = _mm256_set1_ph(3.); - let r = _mm256_scalef_ph(a, b); - let e = _mm256_set1_ph(8.0); + unsafe fn test_mm256_cvtepu16_ph() { + let a = _mm256_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let r = _mm256_cvtepu16_ph(a); + let e = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_scalef_ph() { - let a = _mm256_set1_ph(1.); - let b = _mm256_set1_ph(3.); - let src = _mm256_set1_ph(2.); - let r = _mm256_mask_scalef_ph(src, 0b0101010101010101, a, b); + unsafe fn test_mm256_mask_cvtepu16_ph() { + let a = _mm256_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let src = _mm256_set_ph( + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., + ); + let r = _mm256_mask_cvtepu16_ph(src, 0b0101010101010101, a); let e = _mm256_set_ph( - 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, + 10., 2., 12., 4., 14., 6., 16., 8., 18., 10., 20., 12., 22., 14., 24., 16., ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_scalef_ph() { - let a = _mm256_set1_ph(1.); - let b = _mm256_set1_ph(3.); - let r = _mm256_maskz_scalef_ph(0b0101010101010101, a, b); + unsafe fn test_mm256_maskz_cvtepu16_ph() { + let a = _mm256_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let r = _mm256_maskz_cvtepu16_ph(0b0101010101010101, a); let e = _mm256_set_ph( - 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, + 0., 2., 0., 4., 0., 6., 0., 8., 0., 10., 0., 12., 0., 14., 0., 16., ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_scalef_ph() { - let a = _mm512_set1_ph(1.); - let b = _mm512_set1_ph(3.); - let r = _mm512_scalef_ph(a, b); - let e = _mm512_set1_ph(8.0); + unsafe fn test_mm512_cvtepu16_ph() { + let a = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + let r = _mm512_cvtepu16_ph(a); + let e = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_scalef_ph() { - let a = _mm512_set1_ph(1.); - let b = _mm512_set1_ph(3.); - let src = _mm512_set1_ph(2.); - let r = _mm512_mask_scalef_ph(src, 0b01010101010101010101010101010101, a, b); + unsafe fn test_mm512_mask_cvtepu16_ph() { + let a = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + let src = _mm512_set_ph( + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., + 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., + ); + let r = _mm512_mask_cvtepu16_ph(src, 0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, - 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, + 10., 2., 12., 4., 14., 6., 16., 8., 18., 10., 20., 12., 22., 14., 24., 16., 26., 18., + 28., 20., 30., 22., 32., 24., 34., 26., 36., 28., 38., 30., 40., 32., ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_scalef_ph() { - let a = _mm512_set1_ph(1.); - let b = _mm512_set1_ph(3.); - let r = _mm512_maskz_scalef_ph(0b01010101010101010101010101010101, a, b); + unsafe fn test_mm512_maskz_cvtepu16_ph() { + let a = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + let r = _mm512_maskz_cvtepu16_ph(0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, - 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, + 0., 2., 0., 4., 0., 6., 0., 8., 0., 10., 0., 12., 0., 14., 0., 16., 0., 18., 0., 20., + 0., 22., 0., 24., 0., 26., 0., 28., 0., 30., 0., 32., ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_scalef_round_ph() { - let a = _mm512_set1_ph(1.); - let b = _mm512_set1_ph(3.); - let r = _mm512_scalef_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_ph(8.0); + unsafe fn test_mm512_cvt_roundepu16_ph() { + let a = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + let r = _mm512_cvt_roundepu16_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + let e = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_scalef_round_ph() { - let a = _mm512_set1_ph(1.); - let b = _mm512_set1_ph(3.); - let src = _mm512_set1_ph(2.); - let r = _mm512_mask_scalef_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask_cvt_roundepu16_ph() { + let a = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + let src = _mm512_set_ph( + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., + 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., + ); + let r = _mm512_mask_cvt_roundepu16_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 0b01010101010101010101010101010101, a, - b, ); let e = _mm512_set_ph( - 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, - 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, + 10., 2., 12., 4., 14., 6., 16., 8., 18., 10., 20., 12., 22., 14., 24., 16., 26., 18., + 28., 20., 30., 22., 32., 24., 34., 26., 36., 28., 38., 30., 40., 32., ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_scalef_round_ph() { - let a = _mm512_set1_ph(1.); - let b = _mm512_set1_ph(3.); - let r = _mm512_maskz_scalef_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_maskz_cvt_roundepu16_ph() { + let a = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + let r = _mm512_maskz_cvt_roundepu16_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b01010101010101010101010101010101, a, - b, ); let e = _mm512_set_ph( - 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, - 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, + 0., 2., 0., 4., 0., 6., 0., 8., 0., 10., 0., 12., 0., 14., 0., 16., 0., 18., 0., 20., + 0., 22., 0., 24., 0., 26., 0., 28., 0., 30., 0., 32., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_cvtepi32_ph() { + let a = _mm_set_epi32(1, 2, 3, 4); + let r = _mm_cvtepi32_ph(a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_cvtepi32_ph() { + let a = _mm_set_epi32(1, 2, 3, 4); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_cvtepi32_ph(src, 0b0101, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 14., 2., 16., 4.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_cvtepi32_ph() { + let a = _mm_set_epi32(1, 2, 3, 4); + let r = _mm_maskz_cvtepi32_ph(0b0101, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 2., 0.0, 4.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_cvtepi32_ph() { + let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm256_cvtepi32_ph(a); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_cvtepi32_ph() { + let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm256_mask_cvtepi32_ph(src, 0b01010101, a); + let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_cvtepi32_ph() { + let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm256_maskz_cvtepi32_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_cvtepi32_ph() { + let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let r = _mm512_cvtepi32_ph(a); + let e = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - assert_eq_m512h(r, e); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_scalef_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_scalef_sh(a, b); - let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); + unsafe fn test_mm512_mask_cvtepi32_ph() { + let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let src = _mm256_set_ph( + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., + ); + let r = _mm512_mask_cvtepi32_ph(src, 0b0101010101010101, a); + let e = _mm256_set_ph( + 10., 2., 12., 4., 14., 6., 16., 8., 18., 10., 20., 12., 22., 14., 24., 16., + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_scalef_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); - let src = _mm_setr_ph(2.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_scalef_sh(src, 0, a, b); - let e = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_scalef_sh(src, 1, a, b); - let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); + unsafe fn test_mm512_maskz_cvtepi32_ph() { + let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let r = _mm512_maskz_cvtepi32_ph(0b0101010101010101, a); + let e = _mm256_set_ph( + 0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0, 0.0, 10.0, 0.0, 12.0, 0.0, 14.0, 0.0, 16.0, + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_scalef_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_maskz_scalef_sh(0, a, b); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_maskz_scalef_sh(1, a, b); - let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); + unsafe fn test_mm512_cvt_roundepi32_ph() { + let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let r = _mm512_cvt_roundepi32_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + let e = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_scalef_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_scalef_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); + unsafe fn test_mm512_mask_cvt_roundepi32_ph() { + let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let src = _mm256_set_ph( + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., + ); + let r = _mm512_mask_cvt_roundepi32_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b0101010101010101, + a, + ); + let e = _mm256_set_ph( + 10., 2., 12., 4., 14., 6., 16., 8., 18., 10., 20., 12., 22., 14., 24., 16., + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_scalef_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); - let src = _mm_setr_ph(2.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_scalef_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, + unsafe fn test_mm512_maskz_cvt_roundepi32_ph() { + let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let r = _mm512_maskz_cvt_roundepi32_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, + a, ); - let e = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_scalef_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, + let e = _mm256_set_ph( + 0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0, 0.0, 10.0, 0.0, 12.0, 0.0, 14.0, 0.0, 16.0, ); - let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_scalef_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); - let r = - _mm_maskz_scalef_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_cvti32_sh() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvti32_sh(a, 10); + let e = _mm_setr_ph(10.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); assert_eq_m128h(r, e); - let r = - _mm_maskz_scalef_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cvt_roundi32_sh() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvt_roundi32_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, 10); + let e = _mm_setr_ph(10.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_reduce_ph() { - let a = _mm_set1_ph(1.25); - let r = _mm_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(a); - let e = _mm_set1_ph(0.25); + unsafe fn test_mm_cvtepu32_ph() { + let a = _mm_set_epi32(1, 2, 3, 4); + let r = _mm_cvtepu32_ph(a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_reduce_ph() { - let a = _mm_set1_ph(1.25); - let src = _mm_set1_ph(2.0); - let r = _mm_mask_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(src, 0b01010101, a); - let e = _mm_set_ph(2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25); + unsafe fn test_mm_mask_cvtepu32_ph() { + let a = _mm_set_epi32(1, 2, 3, 4); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_cvtepu32_ph(src, 0b0101, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 14., 2., 16., 4.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_reduce_ph() { - let a = _mm_set1_ph(1.25); - let r = _mm_maskz_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(0b01010101, a); - let e = _mm_set_ph(0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25); + unsafe fn test_mm_maskz_cvtepu32_ph() { + let a = _mm_set_epi32(1, 2, 3, 4); + let r = _mm_maskz_cvtepu32_ph(0b0101, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 2., 0.0, 4.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_reduce_ph() { - let a = _mm256_set1_ph(1.25); - let r = _mm256_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(a); - let e = _mm256_set1_ph(0.25); - assert_eq_m256h(r, e); + unsafe fn test_mm256_cvtepu32_ph() { + let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm256_cvtepu32_ph(a); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_reduce_ph() { - let a = _mm256_set1_ph(1.25); - let src = _mm256_set1_ph(2.0); - let r = _mm256_mask_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(src, 0b0101010101010101, a); - let e = _mm256_set_ph( - 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, - ); - assert_eq_m256h(r, e); + unsafe fn test_mm256_mask_cvtepu32_ph() { + let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm256_mask_cvtepu32_ph(src, 0b01010101, a); + let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_reduce_ph() { - let a = _mm256_set1_ph(1.25); - let r = _mm256_maskz_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(0b0101010101010101, a); - let e = _mm256_set_ph( - 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, - ); - assert_eq_m256h(r, e); + unsafe fn test_mm256_maskz_cvtepu32_ph() { + let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm256_maskz_cvtepu32_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_reduce_ph() { - let a = _mm512_set1_ph(1.25); - let r = _mm512_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(a); - let e = _mm512_set1_ph(0.25); - assert_eq_m512h(r, e); + unsafe fn test_mm512_cvtepu32_ph() { + let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let r = _mm512_cvtepu32_ph(a); + let e = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_reduce_ph() { - let a = _mm512_set1_ph(1.25); - let src = _mm512_set1_ph(2.0); - let r = _mm512_mask_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>( - src, - 0b01010101010101010101010101010101, - a, + unsafe fn test_mm512_mask_cvtepu32_ph() { + let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let src = _mm256_set_ph( + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., ); - let e = _mm512_set_ph( - 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, - 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, + let r = _mm512_mask_cvtepu32_ph(src, 0b0101010101010101, a); + let e = _mm256_set_ph( + 10., 2.0, 12., 4.0, 14., 6.0, 16., 8.0, 18., 10.0, 20., 12.0, 22., 14.0, 24., 16.0, ); - assert_eq_m512h(r, e); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_reduce_ph() { - let a = _mm512_set1_ph(1.25); - let r = _mm512_maskz_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>( - 0b01010101010101010101010101010101, - a, - ); - let e = _mm512_set_ph( - 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, - 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, + unsafe fn test_mm512_maskz_cvtepu32_ph() { + let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let r = _mm512_maskz_cvtepu32_ph(0b0101010101010101, a); + let e = _mm256_set_ph( + 0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0, 0.0, 10.0, 0.0, 12.0, 0.0, 14.0, 0.0, 16.0, ); - assert_eq_m512h(r, e); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_reduce_round_ph() { - let a = _mm512_set1_ph(1.25); - let r = _mm512_reduce_round_ph::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>(a); - let e = _mm512_set1_ph(0.25); - assert_eq_m512h(r, e); + unsafe fn test_mm512_cvt_roundepu32_ph() { + let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let r = _mm512_cvt_roundepu32_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + let e = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_reduce_round_ph() { - let a = _mm512_set1_ph(1.25); - let src = _mm512_set1_ph(2.0); - let r = _mm512_mask_reduce_round_ph::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>( + unsafe fn test_mm512_mask_cvt_roundepu32_ph() { + let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let src = _mm256_set_ph( + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., + ); + let r = _mm512_mask_cvt_roundepu32_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, - 0b01010101010101010101010101010101, + 0b0101010101010101, a, ); - let e = _mm512_set_ph( - 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, - 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, + let e = _mm256_set_ph( + 10.0, 2.0, 12.0, 4.0, 14.0, 6.0, 16.0, 8.0, 18.0, 10.0, 20.0, 12.0, 22.0, 14.0, 24.0, + 16.0, ); - assert_eq_m512h(r, e); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_reduce_round_ph() { - let a = _mm512_set1_ph(1.25); - let r = _mm512_maskz_reduce_round_ph::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>( - 0b01010101010101010101010101010101, + unsafe fn test_mm512_maskz_cvt_roundepu32_ph() { + let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let r = _mm512_maskz_cvt_roundepu32_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, a, ); - let e = _mm512_set_ph( - 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, - 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, + let e = _mm256_set_ph( + 0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0, 0.0, 10.0, 0.0, 12.0, 0.0, 14.0, 0.0, 16.0, ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_reduce_sh() { - let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_reduce_sh::<{ 16 | _MM_FROUND_TO_ZERO }>(a, b); - let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_reduce_sh() { - let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); - let src = _mm_setr_ph(2.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_reduce_sh::<{ 16 | _MM_FROUND_TO_ZERO }>(src, 0, a, b); - let e = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_reduce_sh::<{ 16 | _MM_FROUND_TO_ZERO }>(src, 1, a, b); - let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_reduce_sh() { - let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_maskz_reduce_sh::<{ 16 | _MM_FROUND_TO_ZERO }>(0, a, b); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_maskz_reduce_sh::<{ 16 | _MM_FROUND_TO_ZERO }>(1, a, b); - let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_cvtu32_sh() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvtu32_sh(a, 10); + let e = _mm_setr_ph(10.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_reduce_round_sh() { - let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_reduce_round_sh::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>(a, b); - let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_cvt_roundu32_sh() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvt_roundu32_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, 10); + let e = _mm_setr_ph(10.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_reduce_round_sh() { - let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); - let src = _mm_setr_ph(2.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_reduce_round_sh::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>( - src, 0, a, b, - ); - let e = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_reduce_round_sh::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>( - src, 1, a, b, - ); - let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_cvtepi64_ph() { + let a = _mm_set_epi64x(1, 2); + let r = _mm_cvtepi64_ph(a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_reduce_round_sh() { - let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); - let r = - _mm_maskz_reduce_round_sh::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>(0, a, b); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = - _mm_maskz_reduce_round_sh::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>(1, a, b); - let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_cvtepi64_ph() { + let a = _mm_set_epi64x(1, 2); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_cvtepi64_ph(src, 0b01, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 16., 2.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_reduce_add_ph() { - let a = _mm_set1_ph(2.0); - let r = _mm_reduce_add_ph(a); - assert_eq!(r, 16.0); + unsafe fn test_mm_maskz_cvtepi64_ph() { + let a = _mm_set_epi64x(1, 2); + let r = _mm_maskz_cvtepi64_ph(0b01, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_reduce_add_ph() { - let a = _mm256_set1_ph(2.0); - let r = _mm256_reduce_add_ph(a); - assert_eq!(r, 32.0); + unsafe fn test_mm256_cvtepi64_ph() { + let a = _mm256_set_epi64x(1, 2, 3, 4); + let r = _mm256_cvtepi64_ph(a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_reduce_add_ph() { - let a = _mm512_set1_ph(2.0); - let r = _mm512_reduce_add_ph(a); - assert_eq!(r, 64.0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_cvtepi64_ph() { + let a = _mm256_set_epi64x(1, 2, 3, 4); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm256_mask_cvtepi64_ph(src, 0b0101, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 14., 2.0, 16.0, 4.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_reduce_mul_ph() { - let a = _mm_set1_ph(2.0); - let r = _mm_reduce_mul_ph(a); - assert_eq!(r, 256.0); + unsafe fn test_mm256_maskz_cvtepi64_ph() { + let a = _mm256_set_epi64x(1, 2, 3, 4); + let r = _mm256_maskz_cvtepi64_ph(0b0101, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 4.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_reduce_mul_ph() { - let a = _mm256_set1_ph(2.0); - let r = _mm256_reduce_mul_ph(a); - assert_eq!(r, 65536.0); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_cvtepi64_ph() { + let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm512_cvtepi64_ph(a); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_reduce_mul_ph() { - let a = _mm512_set1_ph(2.0); - let r = _mm512_reduce_mul_ph(a); - assert_eq!(r, 16777216.0); + unsafe fn test_mm512_mask_cvtepi64_ph() { + let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm512_mask_cvtepi64_ph(src, 0b01010101, a); + let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_reduce_max_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let r = _mm_reduce_max_ph(a); - assert_eq!(r, 8.0); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_cvtepi64_ph() { + let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm512_maskz_cvtepi64_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 2., 0.0, 4., 0.0, 6., 0.0, 8.); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_reduce_max_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_cvt_roundepi64_ph() { + let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm512_cvt_roundepi64_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_cvt_roundepi64_ph() { + let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm512_mask_cvt_roundepi64_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0b01010101, a, ); - let r = _mm256_reduce_max_ph(a); - assert_eq!(r, 16.0); + let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_reduce_max_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, + unsafe fn test_mm512_maskz_cvt_roundepi64_ph() { + let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm512_maskz_cvt_roundepi64_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101, a, ); - let r = _mm512_reduce_max_ph(a); - assert_eq!(r, 32.0); + let e = _mm_set_ph(0.0, 2., 0.0, 4., 0.0, 6., 0.0, 8.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_reduce_min_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let r = _mm_reduce_min_ph(a); - assert_eq!(r, 1.0); + unsafe fn test_mm_cvtepu64_ph() { + let a = _mm_set_epi64x(1, 2); + let r = _mm_cvtepu64_ph(a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_reduce_min_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let r = _mm256_reduce_min_ph(a); - assert_eq!(r, 1.0); + unsafe fn test_mm_mask_cvtepu64_ph() { + let a = _mm_set_epi64x(1, 2); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_cvtepu64_ph(src, 0b01, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 16., 2.); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_reduce_min_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let r = _mm512_reduce_min_ph(a); - assert_eq!(r, 1.0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_cvtepu64_ph() { + let a = _mm_set_epi64x(1, 2); + let r = _mm_maskz_cvtepu64_ph(0b01, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fpclass_ph_mask() { - let a = _mm_set_ph( - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal - ); - let r = _mm_fpclass_ph_mask::<0x18>(a); // infinities - assert_eq!(r, 0b01100000); + unsafe fn test_mm256_cvtepu64_ph() { + let a = _mm256_set_epi64x(1, 2, 3, 4); + let r = _mm256_cvtepu64_ph(a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fpclass_ph_mask() { - let a = _mm_set_ph( - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal - ); - let r = _mm_mask_fpclass_ph_mask::<0x18>(0b01010101, a); - assert_eq!(r, 0b01000000); + unsafe fn test_mm256_mask_cvtepu64_ph() { + let a = _mm256_set_epi64x(1, 2, 3, 4); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm256_mask_cvtepu64_ph(src, 0b0101, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 14., 2.0, 16.0, 4.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fpclass_ph_mask() { - let a = _mm256_set_ph( - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal - ); - let r = _mm256_fpclass_ph_mask::<0x18>(a); // infinities - assert_eq!(r, 0b0110000001100000); + unsafe fn test_mm256_maskz_cvtepu64_ph() { + let a = _mm256_set_epi64x(1, 2, 3, 4); + let r = _mm256_maskz_cvtepu64_ph(0b0101, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 4.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fpclass_ph_mask() { - let a = _mm256_set_ph( - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal - ); - let r = _mm256_mask_fpclass_ph_mask::<0x18>(0b0101010101010101, a); - assert_eq!(r, 0b0100000001000000); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_cvtepu64_ph() { + let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm512_cvtepu64_ph(a); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fpclass_ph_mask() { - let a = _mm512_set_ph( - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal + unsafe fn test_mm512_mask_cvtepu64_ph() { + let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm512_mask_cvtepu64_ph(src, 0b01010101, a); + let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_cvtepu64_ph() { + let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm512_maskz_cvtepu64_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 2., 0.0, 4., 0.0, 6., 0.0, 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_cvt_roundepu64_ph() { + let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm512_cvt_roundepu64_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_cvt_roundepu64_ph() { + let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm512_mask_cvt_roundepu64_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0b01010101, a, ); - let r = _mm512_fpclass_ph_mask::<0x18>(a); // infinities - assert_eq!(r, 0b01100000011000000110000001100000); + let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fpclass_ph_mask() { - let a = _mm512_set_ph( - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal + unsafe fn test_mm512_maskz_cvt_roundepu64_ph() { + let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm512_maskz_cvt_roundepu64_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101, a, ); - let r = _mm512_mask_fpclass_ph_mask::<0x18>(0b01010101010101010101010101010101, a); - assert_eq!(r, 0b01000000010000000100000001000000); + let e = _mm_set_ph(0.0, 2., 0.0, 4., 0.0, 6., 0.0, 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_cvtxps_ph() { + let a = _mm_set_ps(1.0, 2.0, 3.0, 4.0); + let r = _mm_cvtxps_ph(a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fpclass_sh_mask() { - let a = _mm_set_sh(f16::INFINITY); - let r = _mm_fpclass_sh_mask::<0x18>(a); - assert_eq!(r, 1); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_cvtxps_ph() { + let a = _mm_set_ps(1.0, 2.0, 3.0, 4.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_cvtxps_ph(src, 0b0101, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 14., 2.0, 16., 4.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fpclass_sh_mask() { - let a = _mm_set_sh(f16::INFINITY); - let r = _mm_mask_fpclass_sh_mask::<0x18>(0, a); - assert_eq!(r, 0); - let r = _mm_mask_fpclass_sh_mask::<0x18>(1, a); - assert_eq!(r, 1); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_cvtxps_ph() { + let a = _mm_set_ps(1.0, 2.0, 3.0, 4.0); + let r = _mm_maskz_cvtxps_ph(0b0101, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 4.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_blend_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(-1.0, -2.0, -3.0, -4.0, -5.0, -6.0, -7.0, -8.0); - let r = _mm_mask_blend_ph(0b01010101, a, b); - let e = _mm_set_ph(1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0); + unsafe fn test_mm256_cvtxps_ph() { + let a = _mm256_set_ps(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm256_cvtxps_ph(a); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_blend_ph() { - let a = _mm256_set_ph( + unsafe fn test_mm256_mask_cvtxps_ph() { + let a = _mm256_set_ps(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm256_mask_cvtxps_ph(src, 0b01010101, a); + let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_cvtxps_ph() { + let a = _mm256_set_ps(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm256_maskz_cvtxps_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_cvtxps_ph() { + let a = _mm512_set_ps( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let b = _mm256_set_ph( - -1.0, -2.0, -3.0, -4.0, -5.0, -6.0, -7.0, -8.0, -9.0, -10.0, -11.0, -12.0, -13.0, - -14.0, -15.0, -16.0, - ); - let r = _mm256_mask_blend_ph(0b0101010101010101, a, b); + let r = _mm512_cvtxps_ph(a); let e = _mm256_set_ph( - 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, -14.0, 15.0, - -16.0, + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_blend_ph() { - let a = _mm512_set_ph( + unsafe fn test_mm512_mask_cvtxps_ph() { + let a = _mm512_set_ps( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, ); - let b = _mm512_set_ph( - -1.0, -2.0, -3.0, -4.0, -5.0, -6.0, -7.0, -8.0, -9.0, -10.0, -11.0, -12.0, -13.0, - -14.0, -15.0, -16.0, -17.0, -18.0, -19.0, -20.0, -21.0, -22.0, -23.0, -24.0, -25.0, - -26.0, -27.0, -28.0, -29.0, -30.0, -31.0, -32.0, + let src = _mm256_set_ph( + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., ); - let r = _mm512_mask_blend_ph(0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, -14.0, 15.0, - -16.0, 17.0, -18.0, 19.0, -20.0, 21.0, -22.0, 23.0, -24.0, 25.0, -26.0, 27.0, -28.0, - 29.0, -30.0, 31.0, -32.0, + let r = _mm512_mask_cvtxps_ph(src, 0b0101010101010101, a); + let e = _mm256_set_ph( + 10., 2.0, 12., 4.0, 14., 6.0, 16., 8.0, 18., 10.0, 20., 12.0, 22., 14.0, 24., 16.0, ); - assert_eq_m512h(r, e); + assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_permutex2var_ph() { - let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_setr_ph(9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let idx = _mm_setr_epi16(0, 2, 4, 6, 8, 10, 12, 14); - let r = _mm_permutex2var_ph(a, idx, b); - let e = _mm_setr_ph(1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0); - assert_eq_m128h(r, e); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_cvtxps_ph() { + let a = _mm512_set_ps( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let r = _mm512_maskz_cvtxps_ph(0b0101010101010101, a); + let e = _mm256_set_ph( + 0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0, 0.0, 10.0, 0.0, 12.0, 0.0, 14.0, 0.0, 16.0, + ); + assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_permutex2var_ph() { - let a = _mm256_setr_ph( + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_cvtx_roundps_ph() { + let a = _mm512_set_ps( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let b = _mm256_setr_ph( - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, + let r = _mm512_cvtx_roundps_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + let e = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let idx = _mm256_setr_epi16(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); - let r = _mm256_permutex2var_ph(a, idx, b); - let e = _mm256_setr_ph( - 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, 23.0, 25.0, 27.0, 29.0, - 31.0, + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_cvtx_roundps_ph() { + let a = _mm512_set_ps( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let src = _mm256_set_ph( + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., + ); + let r = _mm512_mask_cvtx_roundps_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b0101010101010101, + a, + ); + let e = _mm256_set_ph( + 10.0, 2.0, 12.0, 4.0, 14.0, 6.0, 16.0, 8.0, 18.0, 10.0, 20.0, 12.0, 22.0, 14.0, 24.0, + 16.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_permutex2var_ph() { - let a = _mm512_setr_ph( + unsafe fn test_mm512_maskz_cvtx_roundps_ph() { + let a = _mm512_set_ps( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, ); - let b = _mm512_setr_ph( - 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, - 47.0, 48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, - 61.0, 62.0, 63.0, 64.0, + let r = _mm512_maskz_cvtx_roundps_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, + a, ); - let idx = _mm512_set_epi16( - 62, 60, 58, 56, 54, 52, 50, 48, 46, 44, 42, 40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, - 18, 16, 14, 12, 10, 8, 6, 4, 2, 0, + let e = _mm256_set_ph( + 0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0, 0.0, 10.0, 0.0, 12.0, 0.0, 14.0, 0.0, 16.0, ); - let r = _mm512_permutex2var_ph(a, idx, b); - let e = _mm512_setr_ph( - 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, 23.0, 25.0, 27.0, 29.0, - 31.0, 33.0, 35.0, 37.0, 39.0, 41.0, 43.0, 45.0, 47.0, 49.0, 51.0, 53.0, 55.0, 57.0, - 59.0, 61.0, 63.0, + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cvtss_sh() { + let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let b = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); + let r = _mm_cvtss_sh(a, b); + let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_cvtss_sh() { + let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let b = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); + let src = _mm_setr_ph(20., 21., 22., 23., 24., 25., 26., 27.); + let r = _mm_mask_cvtss_sh(src, 0, a, b); + let e = _mm_setr_ph(20., 11., 12., 13., 14., 15., 16., 17.); + assert_eq_m128h(r, e); + let r = _mm_mask_cvtss_sh(src, 1, a, b); + let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_cvtss_sh() { + let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let b = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); + let r = _mm_maskz_cvtss_sh(0, a, b); + let e = _mm_setr_ph(0.0, 11., 12., 13., 14., 15., 16., 17.); + assert_eq_m128h(r, e); + let r = _mm_maskz_cvtss_sh(1, a, b); + let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cvt_roundss_sh() { + let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let b = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); + let r = _mm_cvt_roundss_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_cvt_roundss_sh() { + let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let b = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); + let src = _mm_setr_ph(20., 21., 22., 23., 24., 25., 26., 27.); + let r = _mm_mask_cvt_roundss_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, ); - assert_eq_m512h(r, e); + let e = _mm_setr_ph(20., 11., 12., 13., 14., 15., 16., 17.); + assert_eq_m128h(r, e); + let r = _mm_mask_cvt_roundss_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_cvt_roundss_sh() { + let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let b = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); + let r = + _mm_maskz_cvt_roundss_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 11., 12., 13., 14., 15., 16., 17.); + assert_eq_m128h(r, e); + let r = + _mm_maskz_cvt_roundss_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_permutexvar_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let idx = _mm_set_epi16(0, 2, 4, 6, 1, 3, 5, 7); - let r = _mm_permutexvar_ph(idx, a); - let e = _mm_setr_ph(1.0, 3.0, 5.0, 7.0, 2.0, 4.0, 6.0, 8.0); + unsafe fn test_mm_cvtpd_ph() { + let a = _mm_set_pd(1.0, 2.0); + let r = _mm_cvtpd_ph(a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_permutexvar_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let idx = _mm256_set_epi16(0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15); - let r = _mm256_permutexvar_ph(idx, a); - let e = _mm256_setr_ph( - 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, + unsafe fn test_mm_mask_cvtpd_ph() { + let a = _mm_set_pd(1.0, 2.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_cvtpd_ph(src, 0b01, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 16., 2.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_cvtpd_ph() { + let a = _mm_set_pd(1.0, 2.0); + let r = _mm_maskz_cvtpd_ph(0b01, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_cvtpd_ph() { + let a = _mm256_set_pd(1.0, 2.0, 3.0, 4.0); + let r = _mm256_cvtpd_ph(a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_cvtpd_ph() { + let a = _mm256_set_pd(1.0, 2.0, 3.0, 4.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm256_mask_cvtpd_ph(src, 0b0101, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 14., 2.0, 16.0, 4.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_cvtpd_ph() { + let a = _mm256_set_pd(1.0, 2.0, 3.0, 4.0); + let r = _mm256_maskz_cvtpd_ph(0b0101, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 4.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_cvtpd_ph() { + let a = _mm512_set_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_cvtpd_ph(a); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_cvtpd_ph() { + let a = _mm512_set_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm512_mask_cvtpd_ph(src, 0b01010101, a); + let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_cvtpd_ph() { + let a = _mm512_set_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_maskz_cvtpd_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 2., 0.0, 4., 0.0, 6., 0.0, 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_cvt_roundpd_ph() { + let a = _mm512_set_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_cvt_roundpd_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_cvt_roundpd_ph() { + let a = _mm512_set_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm512_mask_cvt_roundpd_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0b01010101, a, ); - assert_eq_m256h(r, e); + let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_permutexvar_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, + unsafe fn test_mm512_maskz_cvt_roundpd_ph() { + let a = _mm512_set_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_maskz_cvt_roundpd_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101, a, ); - let idx = _mm512_set_epi16( - 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 1, 3, 5, 7, 9, 11, 13, 15, - 17, 19, 21, 23, 25, 27, 29, 31, + let e = _mm_set_ph(0.0, 2., 0.0, 4., 0.0, 6., 0.0, 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cvtsd_sh() { + let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let b = _mm_setr_pd(1.0, 2.0); + let r = _mm_cvtsd_sh(a, b); + let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_cvtsd_sh() { + let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let b = _mm_setr_pd(1.0, 2.0); + let src = _mm_setr_ph(20., 21., 22., 23., 24., 25., 26., 27.); + let r = _mm_mask_cvtsd_sh(src, 0, a, b); + let e = _mm_setr_ph(20., 11., 12., 13., 14., 15., 16., 17.); + assert_eq_m128h(r, e); + let r = _mm_mask_cvtsd_sh(src, 1, a, b); + let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_cvtsd_sh() { + let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let b = _mm_setr_pd(1.0, 2.0); + let r = _mm_maskz_cvtsd_sh(0, a, b); + let e = _mm_setr_ph(0.0, 11., 12., 13., 14., 15., 16., 17.); + assert_eq_m128h(r, e); + let r = _mm_maskz_cvtsd_sh(1, a, b); + let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cvt_roundsd_sh() { + let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let b = _mm_setr_pd(1.0, 2.0); + let r = _mm_cvt_roundsd_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_cvt_roundsd_sh() { + let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let b = _mm_setr_pd(1.0, 2.0); + let src = _mm_setr_ph(20., 21., 22., 23., 24., 25., 26., 27.); + let r = _mm_mask_cvt_roundsd_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, ); - let r = _mm512_permutexvar_ph(idx, a); - let e = _mm512_setr_ph( - 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, 23.0, 25.0, 27.0, 29.0, - 31.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0, 22.0, 24.0, 26.0, 28.0, - 30.0, 32.0, + let e = _mm_setr_ph(20., 11., 12., 13., 14., 15., 16., 17.); + assert_eq_m128h(r, e); + let r = _mm_mask_cvt_roundsd_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, ); - assert_eq_m512h(r, e); + let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_cvt_roundsd_sh() { + let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let b = _mm_setr_pd(1.0, 2.0); + let r = + _mm_maskz_cvt_roundsd_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 11., 12., 13., 14., 15., 16., 17.); + assert_eq_m128h(r, e); + let r = + _mm_maskz_cvt_roundsd_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); + assert_eq_m128h(r, e); } } diff --git a/crates/core_arch/src/x86_64/avx512fp16.rs b/crates/core_arch/src/x86_64/avx512fp16.rs new file mode 100644 index 0000000000..ebd85ed4ad --- /dev/null +++ b/crates/core_arch/src/x86_64/avx512fp16.rs @@ -0,0 +1,129 @@ +use crate::core_arch::x86::*; +#[cfg(test)] +use stdarch_test::assert_instr; + +/// Convert the signed 64-bit integer b to a half-precision (16-bit) floating-point element, store the +/// result in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements +/// of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvti64_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsi2sh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvti64_sh(a: __m128h, b: i64) -> __m128h { + vcvtsi642sh(a, b, _MM_FROUND_CUR_DIRECTION) +} + +/// Convert the signed 64-bit integer b to a half-precision (16-bit) floating-point element, store the +/// result in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements +/// of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundi64_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsi2sh, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvt_roundi64_sh(a: __m128h, b: i64) -> __m128h { + static_assert_rounding!(ROUNDING); + vcvtsi642sh(a, b, ROUNDING) +} + +/// Convert the unsigned 64-bit integer b to a half-precision (16-bit) floating-point element, store the +/// result in the lower element of dst, and copy the upper 1 packed elements from a to the upper elements +/// of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtu64_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtusi2sh))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtu64_sh(a: __m128h, b: u64) -> __m128h { + vcvtusi642sh(a, b, _MM_FROUND_CUR_DIRECTION) +} + +/// Convert the unsigned 64-bit integer b to a half-precision (16-bit) floating-point element, store the +/// result in the lower element of dst, and copy the upper 1 packed elements from a to the upper elements +/// of dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundu64_sh) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtusi2sh, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvt_roundu64_sh(a: __m128h, b: u64) -> __m128h { + static_assert_rounding!(ROUNDING); + vcvtusi642sh(a, b, ROUNDING) +} + +#[allow(improper_ctypes)] +extern "C" { + #[link_name = "llvm.x86.avx512fp16.vcvtsi642sh"] + fn vcvtsi642sh(a: __m128h, b: i64, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.vcvtusi642sh"] + fn vcvtusi642sh(a: __m128h, b: u64, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.vcvtsh2si64"] + fn vcvtsh2si64(a: __m128h, rounding: i32) -> i64; + #[link_name = "llvm.x86.avx512fp16.vcvtsh2usi64"] + fn vcvtsh2usi64(a: __m128h, rounding: i32) -> u64; + #[link_name = "llvm.x86.avx512fp16.vcvttsh2si64"] + fn vcvttsh2si64(a: __m128h, sae: i32) -> i64; + #[link_name = "llvm.x86.avx512fp16.vcvttsh2usi64"] + fn vcvttsh2usi64(a: __m128h, sae: i32) -> u64; +} + +#[cfg(test)] +mod tests { + use crate::core_arch::{x86::*, x86_64::*}; + use stdarch_test::simd_test; + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cvti64_sh() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvti64_sh(a, 10); + let e = _mm_setr_ph(10.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cvt_roundi64_sh() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvt_roundi64_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, 10); + let e = _mm_setr_ph(10.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cvtu64_sh() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvtu64_sh(a, 10); + let e = _mm_setr_ph(10.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cvt_roundu64_sh() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvt_roundu64_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, 10); + let e = _mm_setr_ph(10.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + assert_eq_m128h(r, e); + } +} diff --git a/crates/core_arch/src/x86_64/mod.rs b/crates/core_arch/src/x86_64/mod.rs index 2e0139c5da..e4ad644edf 100644 --- a/crates/core_arch/src/x86_64/mod.rs +++ b/crates/core_arch/src/x86_64/mod.rs @@ -73,3 +73,7 @@ pub use self::adx::*; mod bt; #[stable(feature = "simd_x86_bittest", since = "1.55.0")] pub use self::bt::*; + +mod avx512fp16; +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub use self::avx512fp16::*; From 57641cc5088370828acd3a0315f7908047b63e81 Mon Sep 17 00:00:00 2001 From: sayantn Date: Wed, 17 Jul 2024 17:17:20 +0530 Subject: [PATCH 09/11] AVX512FP16 Part 8: Convert from f16 --- crates/core_arch/missing-x86.md | 201 - crates/core_arch/src/x86/avx512fp16.rs | 16924 +++++++++++++------- crates/core_arch/src/x86_64/avx512fp16.rs | 180 + 3 files changed, 11072 insertions(+), 6233 deletions(-) diff --git a/crates/core_arch/missing-x86.md b/crates/core_arch/missing-x86.md index 1c2d0a6d7b..94ecc929ef 100644 --- a/crates/core_arch/missing-x86.md +++ b/crates/core_arch/missing-x86.md @@ -56,217 +56,16 @@ * [ ] [`_mm256_cvtsh_h`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtsh_h) * [ ] [`_mm256_set1_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set1_pch) * [ ] [`_mm512_cmp_round_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_round_ph_mask) - * [ ] [`_mm512_cvt_roundph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundph_epi16) - * [ ] [`_mm512_cvt_roundph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundph_epi32) - * [ ] [`_mm512_cvt_roundph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundph_epi64) - * [ ] [`_mm512_cvt_roundph_epu16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundph_epu16) - * [ ] [`_mm512_cvt_roundph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundph_epu32) - * [ ] [`_mm512_cvt_roundph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundph_epu64) - * [ ] [`_mm512_cvt_roundph_pd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundph_pd) - * [ ] [`_mm512_cvtph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtph_epi16) - * [ ] [`_mm512_cvtph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtph_epi32) - * [ ] [`_mm512_cvtph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtph_epi64) - * [ ] [`_mm512_cvtph_epu16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtph_epu16) - * [ ] [`_mm512_cvtph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtph_epu32) - * [ ] [`_mm512_cvtph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtph_epu64) - * [ ] [`_mm512_cvtph_pd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtph_pd) * [ ] [`_mm512_cvtsh_h`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtsh_h) - * [ ] [`_mm512_cvtt_roundph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtt_roundph_epi16) - * [ ] [`_mm512_cvtt_roundph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtt_roundph_epi32) - * [ ] [`_mm512_cvtt_roundph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtt_roundph_epi64) - * [ ] [`_mm512_cvtt_roundph_epu16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtt_roundph_epu16) - * [ ] [`_mm512_cvtt_roundph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtt_roundph_epu32) - * [ ] [`_mm512_cvtt_roundph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtt_roundph_epu64) - * [ ] [`_mm512_cvttph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvttph_epi16) - * [ ] [`_mm512_cvttph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvttph_epi32) - * [ ] [`_mm512_cvttph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvttph_epi64) - * [ ] [`_mm512_cvttph_epu16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvttph_epu16) - * [ ] [`_mm512_cvttph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvttph_epu32) - * [ ] [`_mm512_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvttph_epu64) - * [ ] [`_mm512_cvtx_roundph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtx_roundph_ps) - * [ ] [`_mm512_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtxph_ps) * [ ] [`_mm512_mask_cmp_round_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_round_ph_mask) - * [ ] [`_mm512_mask_cvt_roundph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundph_epi16) - * [ ] [`_mm512_mask_cvt_roundph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundph_epi32) - * [ ] [`_mm512_mask_cvt_roundph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundph_epi64) - * [ ] [`_mm512_mask_cvt_roundph_epu16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundph_epu16) - * [ ] [`_mm512_mask_cvt_roundph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundph_epu32) - * [ ] [`_mm512_mask_cvt_roundph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundph_epu64) - * [ ] [`_mm512_mask_cvt_roundph_pd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundph_pd) - * [ ] [`_mm512_mask_cvtph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtph_epi16) - * [ ] [`_mm512_mask_cvtph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtph_epi32) - * [ ] [`_mm512_mask_cvtph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtph_epi64) - * [ ] [`_mm512_mask_cvtph_epu16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtph_epu16) - * [ ] [`_mm512_mask_cvtph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtph_epu32) - * [ ] [`_mm512_mask_cvtph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtph_epu64) - * [ ] [`_mm512_mask_cvtph_pd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtph_pd) - * [ ] [`_mm512_mask_cvtt_roundph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtt_roundph_epi16) - * [ ] [`_mm512_mask_cvtt_roundph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtt_roundph_epi32) - * [ ] [`_mm512_mask_cvtt_roundph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtt_roundph_epi64) - * [ ] [`_mm512_mask_cvtt_roundph_epu16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtt_roundph_epu16) - * [ ] [`_mm512_mask_cvtt_roundph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtt_roundph_epu32) - * [ ] [`_mm512_mask_cvtt_roundph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtt_roundph_epu64) - * [ ] [`_mm512_mask_cvttph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvttph_epi16) - * [ ] [`_mm512_mask_cvttph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvttph_epi32) - * [ ] [`_mm512_mask_cvttph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvttph_epi64) - * [ ] [`_mm512_mask_cvttph_epu16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvttph_epu16) - * [ ] [`_mm512_mask_cvttph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvttph_epu32) - * [ ] [`_mm512_mask_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvttph_epu64) - * [ ] [`_mm512_mask_cvtx_roundph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtx_roundph_ps) - * [ ] [`_mm512_mask_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtxph_ps) - * [ ] [`_mm512_maskz_cvt_roundph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundph_epi16) - * [ ] [`_mm512_maskz_cvt_roundph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundph_epi32) - * [ ] [`_mm512_maskz_cvt_roundph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundph_epi64) - * [ ] [`_mm512_maskz_cvt_roundph_epu16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundph_epu16) - * [ ] [`_mm512_maskz_cvt_roundph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundph_epu32) - * [ ] [`_mm512_maskz_cvt_roundph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundph_epu64) - * [ ] [`_mm512_maskz_cvt_roundph_pd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundph_pd) - * [ ] [`_mm512_maskz_cvtph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtph_epi16) - * [ ] [`_mm512_maskz_cvtph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtph_epi32) - * [ ] [`_mm512_maskz_cvtph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtph_epi64) - * [ ] [`_mm512_maskz_cvtph_epu16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtph_epu16) - * [ ] [`_mm512_maskz_cvtph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtph_epu32) - * [ ] [`_mm512_maskz_cvtph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtph_epu64) - * [ ] [`_mm512_maskz_cvtph_pd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtph_pd) - * [ ] [`_mm512_maskz_cvtt_roundph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtt_roundph_epi16) - * [ ] [`_mm512_maskz_cvtt_roundph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtt_roundph_epi32) - * [ ] [`_mm512_maskz_cvtt_roundph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtt_roundph_epi64) - * [ ] [`_mm512_maskz_cvtt_roundph_epu16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtt_roundph_epu16) - * [ ] [`_mm512_maskz_cvtt_roundph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtt_roundph_epu32) - * [ ] [`_mm512_maskz_cvtt_roundph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtt_roundph_epu64) - * [ ] [`_mm512_maskz_cvttph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvttph_epi16) - * [ ] [`_mm512_maskz_cvttph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvttph_epi32) - * [ ] [`_mm512_maskz_cvttph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvttph_epi64) - * [ ] [`_mm512_maskz_cvttph_epu16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvttph_epu16) - * [ ] [`_mm512_maskz_cvttph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvttph_epu32) - * [ ] [`_mm512_maskz_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvttph_epu64) - * [ ] [`_mm512_maskz_cvtx_roundph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtx_roundph_ps) - * [ ] [`_mm512_maskz_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtxph_ps) * [ ] [`_mm512_set1_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set1_pch) - * [ ] [`_mm_cvt_roundsh_i32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsh_i32) - * [ ] [`_mm_cvt_roundsh_i64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsh_i64) - * [ ] [`_mm_cvt_roundsh_sd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsh_sd) - * [ ] [`_mm_cvt_roundsh_ss`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsh_ss) - * [ ] [`_mm_cvt_roundsh_u32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsh_u32) - * [ ] [`_mm_cvt_roundsh_u64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsh_u64) * [ ] [`_mm_cvtsh_h`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsh_h) - * [ ] [`_mm_cvtsh_i32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsh_i32) - * [ ] [`_mm_cvtsh_i64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsh_i64) - * [ ] [`_mm_cvtsh_sd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsh_sd) - * [ ] [`_mm_cvtsh_ss`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsh_ss) - * [ ] [`_mm_cvtsh_u32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsh_u32) - * [ ] [`_mm_cvtsh_u64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsh_u64) * [ ] [`_mm_cvtsi128_si16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi128_si16) * [ ] [`_mm_cvtsi16_si128`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi16_si128) - * [ ] [`_mm_cvtt_roundsh_i32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_roundsh_i32) - * [ ] [`_mm_cvtt_roundsh_i64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_roundsh_i64) - * [ ] [`_mm_cvtt_roundsh_u32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_roundsh_u32) - * [ ] [`_mm_cvtt_roundsh_u64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_roundsh_u64) - * [ ] [`_mm_cvttsh_i32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsh_i32) - * [ ] [`_mm_cvttsh_i64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsh_i64) - * [ ] [`_mm_cvttsh_u32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsh_u32) - * [ ] [`_mm_cvttsh_u64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsh_u64) - * [ ] [`_mm_mask_cvt_roundsh_sd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvt_roundsh_sd) - * [ ] [`_mm_mask_cvt_roundsh_ss`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvt_roundsh_ss) - * [ ] [`_mm_mask_cvtsh_sd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsh_sd) - * [ ] [`_mm_mask_cvtsh_ss`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsh_ss) - * [ ] [`_mm_maskz_cvt_roundsh_sd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvt_roundsh_sd) - * [ ] [`_mm_maskz_cvt_roundsh_ss`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvt_roundsh_ss) - * [ ] [`_mm_maskz_cvtsh_sd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtsh_sd) - * [ ] [`_mm_maskz_cvtsh_ss`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtsh_ss) * [ ] [`_mm_set1_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set1_pch)

-
["AVX512_FP16", "AVX512VL"]

- - * [ ] [`_mm256_cvtph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtph_epi16) - * [ ] [`_mm256_cvtph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtph_epi32) - * [ ] [`_mm256_cvtph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtph_epi64) - * [ ] [`_mm256_cvtph_epu16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtph_epu16) - * [ ] [`_mm256_cvtph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtph_epu32) - * [ ] [`_mm256_cvtph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtph_epu64) - * [ ] [`_mm256_cvtph_pd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtph_pd) - * [ ] [`_mm256_cvttph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvttph_epi16) - * [ ] [`_mm256_cvttph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvttph_epi32) - * [ ] [`_mm256_cvttph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvttph_epi64) - * [ ] [`_mm256_cvttph_epu16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvttph_epu16) - * [ ] [`_mm256_cvttph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvttph_epu32) - * [ ] [`_mm256_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvttph_epu64) - * [ ] [`_mm256_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtxph_ps) - * [ ] [`_mm256_mask_cvtph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtph_epi16) - * [ ] [`_mm256_mask_cvtph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtph_epi32) - * [ ] [`_mm256_mask_cvtph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtph_epi64) - * [ ] [`_mm256_mask_cvtph_epu16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtph_epu16) - * [ ] [`_mm256_mask_cvtph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtph_epu32) - * [ ] [`_mm256_mask_cvtph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtph_epu64) - * [ ] [`_mm256_mask_cvtph_pd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtph_pd) - * [ ] [`_mm256_mask_cvttph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvttph_epi16) - * [ ] [`_mm256_mask_cvttph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvttph_epi32) - * [ ] [`_mm256_mask_cvttph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvttph_epi64) - * [ ] [`_mm256_mask_cvttph_epu16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvttph_epu16) - * [ ] [`_mm256_mask_cvttph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvttph_epu32) - * [ ] [`_mm256_mask_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvttph_epu64) - * [ ] [`_mm256_mask_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtxph_ps) - * [ ] [`_mm256_maskz_cvtph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtph_epi16) - * [ ] [`_mm256_maskz_cvtph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtph_epi32) - * [ ] [`_mm256_maskz_cvtph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtph_epi64) - * [ ] [`_mm256_maskz_cvtph_epu16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtph_epu16) - * [ ] [`_mm256_maskz_cvtph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtph_epu32) - * [ ] [`_mm256_maskz_cvtph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtph_epu64) - * [ ] [`_mm256_maskz_cvtph_pd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtph_pd) - * [ ] [`_mm256_maskz_cvttph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvttph_epi16) - * [ ] [`_mm256_maskz_cvttph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvttph_epi32) - * [ ] [`_mm256_maskz_cvttph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvttph_epi64) - * [ ] [`_mm256_maskz_cvttph_epu16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvttph_epu16) - * [ ] [`_mm256_maskz_cvttph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvttph_epu32) - * [ ] [`_mm256_maskz_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvttph_epu64) - * [ ] [`_mm256_maskz_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtxph_ps) - * [ ] [`_mm_cvtph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtph_epi16) - * [ ] [`_mm_cvtph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtph_epi32) - * [ ] [`_mm_cvtph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtph_epi64) - * [ ] [`_mm_cvtph_epu16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtph_epu16) - * [ ] [`_mm_cvtph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtph_epu32) - * [ ] [`_mm_cvtph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtph_epu64) - * [ ] [`_mm_cvtph_pd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtph_pd) - * [ ] [`_mm_cvttph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttph_epi16) - * [ ] [`_mm_cvttph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttph_epi32) - * [ ] [`_mm_cvttph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttph_epi64) - * [ ] [`_mm_cvttph_epu16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttph_epu16) - * [ ] [`_mm_cvttph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttph_epu32) - * [ ] [`_mm_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttph_epu64) - * [ ] [`_mm_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtxph_ps) - * [ ] [`_mm_mask_cvtph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtph_epi16) - * [ ] [`_mm_mask_cvtph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtph_epi32) - * [ ] [`_mm_mask_cvtph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtph_epi64) - * [ ] [`_mm_mask_cvtph_epu16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtph_epu16) - * [ ] [`_mm_mask_cvtph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtph_epu32) - * [ ] [`_mm_mask_cvtph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtph_epu64) - * [ ] [`_mm_mask_cvtph_pd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtph_pd) - * [ ] [`_mm_mask_cvttph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvttph_epi16) - * [ ] [`_mm_mask_cvttph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvttph_epi32) - * [ ] [`_mm_mask_cvttph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvttph_epi64) - * [ ] [`_mm_mask_cvttph_epu16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvttph_epu16) - * [ ] [`_mm_mask_cvttph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvttph_epu32) - * [ ] [`_mm_mask_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvttph_epu64) - * [ ] [`_mm_mask_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtxph_ps) - * [ ] [`_mm_maskz_cvtph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtph_epi16) - * [ ] [`_mm_maskz_cvtph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtph_epi32) - * [ ] [`_mm_maskz_cvtph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtph_epi64) - * [ ] [`_mm_maskz_cvtph_epu16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtph_epu16) - * [ ] [`_mm_maskz_cvtph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtph_epu32) - * [ ] [`_mm_maskz_cvtph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtph_epu64) - * [ ] [`_mm_maskz_cvtph_pd`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtph_pd) - * [ ] [`_mm_maskz_cvttph_epi16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvttph_epi16) - * [ ] [`_mm_maskz_cvttph_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvttph_epi32) - * [ ] [`_mm_maskz_cvttph_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvttph_epi64) - * [ ] [`_mm_maskz_cvttph_epu16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvttph_epu16) - * [ ] [`_mm_maskz_cvttph_epu32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvttph_epu32) - * [ ] [`_mm_maskz_cvttph_epu64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvttph_epu64) - * [ ] [`_mm_maskz_cvtxph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtxph_ps) -

- -
["AVX512_VP2INTERSECT", "AVX512F"]

* [ ] [`_mm512_2intersect_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_2intersect_epi32) diff --git a/crates/core_arch/src/x86/avx512fp16.rs b/crates/core_arch/src/x86/avx512fp16.rs index be99002e51..86d38feaec 100644 --- a/crates/core_arch/src/x86/avx512fp16.rs +++ b/crates/core_arch/src/x86/avx512fp16.rs @@ -13079,158 +13079,2952 @@ pub unsafe fn _mm_maskz_cvt_roundsd_sh( _mm_mask_cvt_roundsd_sh::(_mm_setzero_ph(), k, a, b) } -#[allow(improper_ctypes)] -extern "C" { - #[link_name = "llvm.x86.avx512fp16.mask.cmp.sh"] - fn vcmpsh(a: __m128h, b: __m128h, imm8: i32, mask: __mmask8, sae: i32) -> __mmask8; - #[link_name = "llvm.x86.avx512fp16.vcomi.sh"] - fn vcomish(a: __m128h, b: __m128h, imm8: i32, sae: i32) -> i32; +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 16-bit integers, and +/// store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtph_epi16) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2w))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtph_epi16(a: __m128h) -> __m128i { + _mm_mask_cvtph_epi16(_mm_undefined_si128(), 0xff, a) +} - #[link_name = "llvm.x86.avx512fp16.add.ph.512"] - fn vaddph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.sub.ph.512"] - fn vsubph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mul.ph.512"] - fn vmulph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.div.ph.512"] - fn vdivph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 16-bit integers, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtph_epi16) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2w))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvtph_epi16(src: __m128i, k: __mmask8, a: __m128h) -> __m128i { + transmute(vcvtph2w_128(a, src.as_i16x8(), k)) +} - #[link_name = "llvm.x86.avx512fp16.mask.add.sh.round"] - fn vaddsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.sub.sh.round"] - fn vsubsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.mul.sh.round"] - fn vmulsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.div.sh.round"] - fn vdivsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 16-bit integers, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtph_epi16) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2w))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvtph_epi16(k: __mmask8, a: __m128h) -> __m128i { + _mm_mask_cvtph_epi16(_mm_setzero_si128(), k, a) +} - #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.128"] - fn vfmulcph_128(a: __m128, b: __m128, src: __m128, k: __mmask8) -> __m128; - #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.256"] - fn vfmulcph_256(a: __m256, b: __m256, src: __m256, k: __mmask8) -> __m256; - #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.512"] - fn vfmulcph_512(a: __m512, b: __m512, src: __m512, k: __mmask16, rounding: i32) -> __m512; - #[link_name = "llvm.x86.avx512fp16.mask.vfmul.csh"] - fn vfmulcsh(a: __m128, b: __m128, src: __m128, k: __mmask8, rounding: i32) -> __m128; +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 16-bit integers, and +/// store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtph_epi16) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2w))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cvtph_epi16(a: __m256h) -> __m256i { + _mm256_mask_cvtph_epi16(_mm256_undefined_si256(), 0xffff, a) +} - #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.128"] - fn vfcmulcph_128(a: __m128, b: __m128, src: __m128, k: __mmask8) -> __m128; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.256"] - fn vfcmulcph_256(a: __m256, b: __m256, src: __m256, k: __mmask8) -> __m256; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.512"] - fn vfcmulcph_512(a: __m512, b: __m512, src: __m512, k: __mmask16, rounding: i32) -> __m512; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.csh"] - fn vfcmulcsh(a: __m128, b: __m128, src: __m128, k: __mmask8, rounding: i32) -> __m128; +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 16-bit integers, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtph_epi16) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2w))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_cvtph_epi16(src: __m256i, k: __mmask16, a: __m256h) -> __m256i { + transmute(vcvtph2w_256(a, src.as_i16x16(), k)) +} - #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.128"] - fn vfmaddcph_mask3_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; - #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.128"] - fn vfmaddcph_maskz_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; - #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.256"] - fn vfmaddcph_mask3_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; - #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.256"] - fn vfmaddcph_maskz_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; - #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.512"] - fn vfmaddcph_mask3_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) -> __m512; - #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.512"] - fn vfmaddcph_maskz_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) -> __m512; - #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.csh"] - fn vfmaddcsh_mask(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; - #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.csh"] - fn vfmaddcsh_maskz(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 16-bit integers, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtph_epi16) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2w))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_cvtph_epi16(k: __mmask16, a: __m256h) -> __m256i { + _mm256_mask_cvtph_epi16(_mm256_setzero_si256(), k, a) +} - #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.128"] - fn vfcmaddcph_mask3_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; - #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.128"] - fn vfcmaddcph_maskz_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.256"] - fn vfcmaddcph_mask3_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; - #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.256"] - fn vfcmaddcph_maskz_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.512"] - fn vfcmaddcph_mask3_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) - -> __m512; - #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.512"] - fn vfcmaddcph_maskz_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) - -> __m512; - #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.csh"] - fn vfcmaddcsh_mask(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; - #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.csh"] - fn vfcmaddcsh_maskz(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 16-bit integers, and +/// store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtph_epi16) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2w))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvtph_epi16(a: __m512h) -> __m512i { + _mm512_mask_cvtph_epi16(_mm512_undefined_epi32(), 0xffffffff, a) +} - #[link_name = "llvm.x86.avx512fp16.vfmadd.ph.512"] - fn vfmaddph_512(a: __m512h, b: __m512h, c: __m512h, rounding: i32) -> __m512h; - #[link_name = "llvm.fma.f16"] - fn fmaf16(a: f16, b: f16, c: f16) -> f16; // TODO: use `crate::intrinsics::fmaf16` when it's available - #[link_name = "llvm.x86.avx512fp16.vfmadd.f16"] - fn vfmaddsh(a: f16, b: f16, c: f16, rounding: i32) -> f16; +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 16-bit integers, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtph_epi16) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2w))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvtph_epi16(src: __m512i, k: __mmask32, a: __m512h) -> __m512i { + transmute(vcvtph2w_512( + a, + src.as_i16x32(), + k, + _MM_FROUND_CUR_DIRECTION, + )) +} - #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.128"] - fn vfmaddsubph_128(a: __m128h, b: __m128h, c: __m128h) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.256"] - fn vfmaddsubph_256(a: __m256h, b: __m256h, c: __m256h) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.512"] - fn vfmaddsubph_512(a: __m512h, b: __m512h, c: __m512h, rounding: i32) -> __m512h; +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 16-bit integers, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtph_epi16) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2w))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvtph_epi16(k: __mmask32, a: __m512h) -> __m512i { + _mm512_mask_cvtph_epi16(_mm512_setzero_si512(), k, a) +} - #[link_name = "llvm.x86.avx512fp16.mask.rcp.ph.128"] - fn vrcpph_128(a: __m128h, src: __m128h, k: __mmask8) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.rcp.ph.256"] - fn vrcpph_256(a: __m256h, src: __m256h, k: __mmask16) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.mask.rcp.ph.512"] - fn vrcpph_512(a: __m512h, src: __m512h, k: __mmask32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mask.rcp.sh"] - fn vrcpsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8) -> __m128h; +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 16-bit integers, and +/// store the results in dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundph_epi16) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2w, ROUNDING = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvt_roundph_epi16(a: __m512h) -> __m512i { + static_assert_rounding!(ROUNDING); + _mm512_mask_cvt_roundph_epi16::(_mm512_undefined_epi32(), 0xffffffff, a) +} - #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.ph.128"] - fn vrsqrtph_128(a: __m128h, src: __m128h, k: __mmask8) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.ph.256"] - fn vrsqrtph_256(a: __m256h, src: __m256h, k: __mmask16) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.ph.512"] - fn vrsqrtph_512(a: __m512h, src: __m512h, k: __mmask32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.sh"] - fn vrsqrtsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8) -> __m128h; +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 16-bit integers, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding +/// mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundph_epi16) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2w, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvt_roundph_epi16( + src: __m512i, + k: __mmask32, + a: __m512h, +) -> __m512i { + static_assert_rounding!(ROUNDING); + transmute(vcvtph2w_512(a, src.as_i16x32(), k, ROUNDING)) +} - #[link_name = "llvm.x86.avx512fp16.sqrt.ph.512"] - fn vsqrtph_512(a: __m512h, rounding: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mask.sqrt.sh"] - fn vsqrtsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 16-bit integers, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundph_epi16) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2w, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvt_roundph_epi16( + k: __mmask32, + a: __m512h, +) -> __m512i { + static_assert_rounding!(ROUNDING); + _mm512_mask_cvt_roundph_epi16::(_mm512_setzero_si512(), k, a) +} - #[link_name = "llvm.x86.avx512fp16.max.ph.128"] - fn vmaxph_128(a: __m128h, b: __m128h) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.max.ph.256"] - fn vmaxph_256(a: __m256h, b: __m256h) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.max.ph.512"] - fn vmaxph_512(a: __m512h, b: __m512h, sae: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mask.max.sh.round"] - fn vmaxsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, sae: i32) -> __m128h; +/// Convert packed half-precision (16-bit) floating-point elements in a to packed unsigned 16-bit integers, +/// and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtph_epu16) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2uw))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtph_epu16(a: __m128h) -> __m128i { + _mm_mask_cvtph_epu16(_mm_undefined_si128(), 0xff, a) +} - #[link_name = "llvm.x86.avx512fp16.min.ph.128"] - fn vminph_128(a: __m128h, b: __m128h) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.min.ph.256"] - fn vminph_256(a: __m256h, b: __m256h) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.min.ph.512"] - fn vminph_512(a: __m512h, b: __m512h, sae: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mask.min.sh.round"] - fn vminsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, sae: i32) -> __m128h; +/// Convert packed half-precision (16-bit) floating-point elements in a to packed unsigned 16-bit integers, +/// and store the results in dst using writemask k (elements are copied from src when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtph_epu16) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2uw))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvtph_epu16(src: __m128i, k: __mmask8, a: __m128h) -> __m128i { + transmute(vcvtph2uw_128(a, src.as_u16x8(), k)) +} - #[link_name = "llvm.x86.avx512fp16.mask.getexp.ph.128"] - fn vgetexpph_128(a: __m128h, src: __m128h, k: __mmask8) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.getexp.ph.256"] - fn vgetexpph_256(a: __m256h, src: __m256h, k: __mmask16) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.mask.getexp.ph.512"] - fn vgetexpph_512(a: __m512h, src: __m512h, k: __mmask32, sae: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mask.getexp.sh"] - fn vgetexpsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, sae: i32) -> __m128h; +/// Convert packed half-precision (16-bit) floating-point elements in a to packed unsigned 16-bit integers, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtph_epu16) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2uw))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvtph_epu16(k: __mmask8, a: __m128h) -> __m128i { + _mm_mask_cvtph_epu16(_mm_setzero_si128(), k, a) +} - #[link_name = "llvm.x86.avx512fp16.mask.getmant.ph.128"] - fn vgetmantph_128(a: __m128h, imm8: i32, src: __m128h, k: __mmask8) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.getmant.ph.256"] - fn vgetmantph_256(a: __m256h, imm8: i32, src: __m256h, k: __mmask16) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.mask.getmant.ph.512"] - fn vgetmantph_512(a: __m512h, imm8: i32, src: __m512h, k: __mmask32, sae: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mask.getmant.sh"] - fn vgetmantsh( - a: __m128h, +/// Convert packed half-precision (16-bit) floating-point elements in a to packed unsigned 16-bit integers, +/// and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtph_epu16) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2uw))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cvtph_epu16(a: __m256h) -> __m256i { + _mm256_mask_cvtph_epu16(_mm256_undefined_si256(), 0xffff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed unsigned 16-bit integers, +/// and store the results in dst using writemask k (elements are copied from src when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtph_epu16) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2uw))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_cvtph_epu16(src: __m256i, k: __mmask16, a: __m256h) -> __m256i { + transmute(vcvtph2uw_256(a, src.as_u16x16(), k)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed unsigned 16-bit integers, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtph_epu16) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2uw))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_cvtph_epu16(k: __mmask16, a: __m256h) -> __m256i { + _mm256_mask_cvtph_epu16(_mm256_setzero_si256(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed unsigned 16-bit integers, +/// and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtph_epu16) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2uw))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvtph_epu16(a: __m512h) -> __m512i { + _mm512_mask_cvtph_epu16(_mm512_undefined_epi32(), 0xffffffff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed unsigned 16-bit integers, +/// and store the results in dst using writemask k (elements are copied from src when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtph_epu16) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2uw))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvtph_epu16(src: __m512i, k: __mmask32, a: __m512h) -> __m512i { + transmute(vcvtph2uw_512( + a, + src.as_u16x32(), + k, + _MM_FROUND_CUR_DIRECTION, + )) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed unsigned 16-bit integers, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtph_epu16) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2uw))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvtph_epu16(k: __mmask32, a: __m512h) -> __m512i { + _mm512_mask_cvtph_epu16(_mm512_setzero_si512(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed unsigned 16-bit integers, +/// and store the results in dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundph_epu16) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2uw, ROUNDING = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvt_roundph_epu16(a: __m512h) -> __m512i { + static_assert_rounding!(ROUNDING); + _mm512_mask_cvt_roundph_epu16::(_mm512_undefined_epi32(), 0xffffffff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed unsigned 16-bit integers, +/// and store the results in dst using writemask k (elements are copied from src when the corresponding +/// mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundph_epu16) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2uw, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvt_roundph_epu16( + src: __m512i, + k: __mmask32, + a: __m512h, +) -> __m512i { + static_assert_rounding!(ROUNDING); + transmute(vcvtph2uw_512(a, src.as_u16x32(), k, ROUNDING)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed unsigned 16-bit integers, +/// and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundph_epu16) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2uw, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvt_roundph_epu16( + k: __mmask32, + a: __m512h, +) -> __m512i { + static_assert_rounding!(ROUNDING); + _mm512_mask_cvt_roundph_epu16::(_mm512_setzero_si512(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 16-bit integers with +/// truncation, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttph_epi16) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2w))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvttph_epi16(a: __m128h) -> __m128i { + _mm_mask_cvttph_epi16(_mm_undefined_si128(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 16-bit integers with +/// truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvttph_epi16) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2w))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvttph_epi16(src: __m128i, k: __mmask8, a: __m128h) -> __m128i { + transmute(vcvttph2w_128(a, src.as_i16x8(), k)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 16-bit integers with +/// truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvttph_epi16) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2w))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvttph_epi16(k: __mmask8, a: __m128h) -> __m128i { + _mm_mask_cvttph_epi16(_mm_setzero_si128(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 16-bit integers with +/// truncation, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvttph_epi16) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2w))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cvttph_epi16(a: __m256h) -> __m256i { + _mm256_mask_cvttph_epi16(_mm256_undefined_si256(), 0xffff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 16-bit integers with +/// truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvttph_epi16) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2w))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_cvttph_epi16(src: __m256i, k: __mmask16, a: __m256h) -> __m256i { + transmute(vcvttph2w_256(a, src.as_i16x16(), k)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 16-bit integers with +/// truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvttph_epi16) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2w))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_cvttph_epi16(k: __mmask16, a: __m256h) -> __m256i { + _mm256_mask_cvttph_epi16(_mm256_setzero_si256(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 16-bit integers with +/// truncation, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvttph_epi16) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2w))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvttph_epi16(a: __m512h) -> __m512i { + _mm512_mask_cvttph_epi16(_mm512_undefined_epi32(), 0xffffffff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 16-bit integers with +/// truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvttph_epi16) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2w))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvttph_epi16(src: __m512i, k: __mmask32, a: __m512h) -> __m512i { + transmute(vcvttph2w_512( + a, + src.as_i16x32(), + k, + _MM_FROUND_CUR_DIRECTION, + )) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 16-bit integers with +/// truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvttph_epi16) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2w))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvttph_epi16(k: __mmask32, a: __m512h) -> __m512i { + _mm512_mask_cvttph_epi16(_mm512_setzero_si512(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 16-bit integers with +/// truncation, and store the results in dst. +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtt_roundph_epi16) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2w, SAE = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvtt_roundph_epi16(a: __m512h) -> __m512i { + static_assert_sae!(SAE); + _mm512_mask_cvtt_roundph_epi16::(_mm512_undefined_epi32(), 0xffffffff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 16-bit integers with +/// truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding +/// mask bit is not set). +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtt_roundph_epi16) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2w, SAE = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvtt_roundph_epi16( + src: __m512i, + k: __mmask32, + a: __m512h, +) -> __m512i { + static_assert_sae!(SAE); + transmute(vcvttph2w_512(a, src.as_i16x32(), k, SAE)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 16-bit integers with +/// truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding +/// mask bit is not set). +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtt_roundph_epi16) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2w, SAE = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvtt_roundph_epi16(k: __mmask32, a: __m512h) -> __m512i { + static_assert_sae!(SAE); + _mm512_mask_cvtt_roundph_epi16::(_mm512_setzero_si512(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed unsigned 16-bit integers with +/// truncation, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttph_epu16) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2uw))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvttph_epu16(a: __m128h) -> __m128i { + _mm_mask_cvttph_epu16(_mm_undefined_si128(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed unsigned 16-bit integers with +/// truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvttph_epu16) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2uw))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvttph_epu16(src: __m128i, k: __mmask8, a: __m128h) -> __m128i { + transmute(vcvttph2uw_128(a, src.as_u16x8(), k)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed unsigned 16-bit integers with +/// truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvttph_epu16) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2uw))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvttph_epu16(k: __mmask8, a: __m128h) -> __m128i { + _mm_mask_cvttph_epu16(_mm_setzero_si128(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed unsigned 16-bit integers with +/// truncation, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvttph_epu16) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2uw))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cvttph_epu16(a: __m256h) -> __m256i { + _mm256_mask_cvttph_epu16(_mm256_undefined_si256(), 0xffff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed unsigned 16-bit integers with +/// truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvttph_epu16) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2uw))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_cvttph_epu16(src: __m256i, k: __mmask16, a: __m256h) -> __m256i { + transmute(vcvttph2uw_256(a, src.as_u16x16(), k)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed unsigned 16-bit integers with +/// truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvttph_epu16) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2uw))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_cvttph_epu16(k: __mmask16, a: __m256h) -> __m256i { + _mm256_mask_cvttph_epu16(_mm256_setzero_si256(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed unsigned 16-bit integers with +/// truncation, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvttph_epu16) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2uw))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvttph_epu16(a: __m512h) -> __m512i { + _mm512_mask_cvttph_epu16(_mm512_undefined_epi32(), 0xffffffff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed unsigned 16-bit integers with +/// truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvttph_epu16) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2uw))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvttph_epu16(src: __m512i, k: __mmask32, a: __m512h) -> __m512i { + transmute(vcvttph2uw_512( + a, + src.as_u16x32(), + k, + _MM_FROUND_CUR_DIRECTION, + )) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed unsigned 16-bit integers with +/// truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding +/// mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvttph_epu16) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2uw))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvttph_epu16(k: __mmask32, a: __m512h) -> __m512i { + _mm512_mask_cvttph_epu16(_mm512_setzero_si512(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed unsigned 16-bit integers with +/// truncation, and store the results in dst. +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtt_roundph_epu16) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2uw, SAE = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvtt_roundph_epu16(a: __m512h) -> __m512i { + static_assert_sae!(SAE); + _mm512_mask_cvtt_roundph_epu16::(_mm512_undefined_epi32(), 0xffffffff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed unsigned 16-bit integers with +/// truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding +/// mask bit is not set). +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtt_roundph_epu16) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2uw, SAE = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvtt_roundph_epu16( + src: __m512i, + k: __mmask32, + a: __m512h, +) -> __m512i { + static_assert_sae!(SAE); + transmute(vcvttph2uw_512(a, src.as_u16x32(), k, SAE)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed unsigned 16-bit integers with +/// truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding +/// mask bit is not set). +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtt_roundph_epu16) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2uw, SAE = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvtt_roundph_epu16(k: __mmask32, a: __m512h) -> __m512i { + static_assert_sae!(SAE); + _mm512_mask_cvtt_roundph_epu16::(_mm512_setzero_si512(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit integers, and store the +/// results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtph_epi32) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2dq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtph_epi32(a: __m128h) -> __m128i { + _mm_mask_cvtph_epi32(_mm_undefined_si128(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit integers, and store the +/// results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtph_epi32) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2dq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvtph_epi32(src: __m128i, k: __mmask8, a: __m128h) -> __m128i { + transmute(vcvtph2dq_128(a, src.as_i32x4(), k)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit integers, and store the +/// results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtph_epi32) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2dq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvtph_epi32(k: __mmask8, a: __m128h) -> __m128i { + _mm_mask_cvtph_epi32(_mm_setzero_si128(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit integers, and store the +/// results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtph_epi32) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2dq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cvtph_epi32(a: __m128h) -> __m256i { + _mm256_mask_cvtph_epi32(_mm256_undefined_si256(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit integers, and store the +/// results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtph_epi32) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2dq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_cvtph_epi32(src: __m256i, k: __mmask8, a: __m128h) -> __m256i { + transmute(vcvtph2dq_256(a, src.as_i32x8(), k)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit integers, and store the +/// results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtph_epi32) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2dq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_cvtph_epi32(k: __mmask8, a: __m128h) -> __m256i { + _mm256_mask_cvtph_epi32(_mm256_setzero_si256(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit integers, and store the +/// results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtph_epi32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2dq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvtph_epi32(a: __m256h) -> __m512i { + _mm512_mask_cvtph_epi32(_mm512_undefined_epi32(), 0xffff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit integers, and store the +/// results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtph_epi32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2dq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvtph_epi32(src: __m512i, k: __mmask16, a: __m256h) -> __m512i { + transmute(vcvtph2dq_512( + a, + src.as_i32x16(), + k, + _MM_FROUND_CUR_DIRECTION, + )) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit integers, and store the +/// results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtph_epi32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2dq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvtph_epi32(k: __mmask16, a: __m256h) -> __m512i { + _mm512_mask_cvtph_epi32(_mm512_setzero_si512(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit integers, and store the +/// results in dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundph_epi32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2dq, ROUNDING = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvt_roundph_epi32(a: __m256h) -> __m512i { + static_assert_rounding!(ROUNDING); + _mm512_mask_cvt_roundph_epi32::(_mm512_undefined_epi32(), 0xffff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit integers, and store the +/// results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundph_epi32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2dq, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvt_roundph_epi32( + src: __m512i, + k: __mmask16, + a: __m256h, +) -> __m512i { + static_assert_rounding!(ROUNDING); + transmute(vcvtph2dq_512(a, src.as_i32x16(), k, ROUNDING)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit integers, and store the +/// results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundph_epi32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2dq, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvt_roundph_epi32( + k: __mmask16, + a: __m256h, +) -> __m512i { + static_assert_rounding!(ROUNDING); + _mm512_mask_cvt_roundph_epi32::(_mm512_setzero_si512(), k, a) +} + +/// Convert the lower half-precision (16-bit) floating-point element in a to a 32-bit integer, and store +/// the result in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsh_i32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsh2si))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtsh_i32(a: __m128h) -> i32 { + vcvtsh2si32(a, _MM_FROUND_CUR_DIRECTION) +} + +/// Convert the lower half-precision (16-bit) floating-point element in a to a 32-bit integer, and store +/// the result in dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsh_i32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsh2si, ROUNDING = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvt_roundsh_i32(a: __m128h) -> i32 { + static_assert_rounding!(ROUNDING); + vcvtsh2si32(a, ROUNDING) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit integers, and store the +/// results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtph_epu32) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2udq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtph_epu32(a: __m128h) -> __m128i { + _mm_mask_cvtph_epu32(_mm_undefined_si128(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit unsigned integers, and store +/// the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtph_epu32) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2udq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvtph_epu32(src: __m128i, k: __mmask8, a: __m128h) -> __m128i { + transmute(vcvtph2udq_128(a, src.as_u32x4(), k)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit unsigned integers, and store +/// the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtph_epu32) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2udq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvtph_epu32(k: __mmask8, a: __m128h) -> __m128i { + _mm_mask_cvtph_epu32(_mm_setzero_si128(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit unsigned integers, and store +/// the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtph_epu32) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2udq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cvtph_epu32(a: __m128h) -> __m256i { + _mm256_mask_cvtph_epu32(_mm256_undefined_si256(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit unsigned integers, and store +/// the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtph_epu32) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2udq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_cvtph_epu32(src: __m256i, k: __mmask8, a: __m128h) -> __m256i { + transmute(vcvtph2udq_256(a, src.as_u32x8(), k)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit unsigned integers, and store +/// the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtph_epu32) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2udq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_cvtph_epu32(k: __mmask8, a: __m128h) -> __m256i { + _mm256_mask_cvtph_epu32(_mm256_setzero_si256(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit unsigned integers, and store +/// the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtph_epu32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2udq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvtph_epu32(a: __m256h) -> __m512i { + _mm512_mask_cvtph_epu32(_mm512_undefined_epi32(), 0xffff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit unsigned integers, and store +/// the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtph_epu32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2udq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvtph_epu32(src: __m512i, k: __mmask16, a: __m256h) -> __m512i { + transmute(vcvtph2udq_512( + a, + src.as_u32x16(), + k, + _MM_FROUND_CUR_DIRECTION, + )) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit unsigned integers, and store +/// the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtph_epu32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2udq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvtph_epu32(k: __mmask16, a: __m256h) -> __m512i { + _mm512_mask_cvtph_epu32(_mm512_setzero_si512(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit unsigned integers, and store +/// the results in dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundph_epu32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2udq, ROUNDING = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvt_roundph_epu32(a: __m256h) -> __m512i { + static_assert_rounding!(ROUNDING); + _mm512_mask_cvt_roundph_epu32::(_mm512_undefined_epi32(), 0xffff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit unsigned integers, and store +/// the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundph_epu32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2udq, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvt_roundph_epu32( + src: __m512i, + k: __mmask16, + a: __m256h, +) -> __m512i { + static_assert_rounding!(ROUNDING); + transmute(vcvtph2udq_512(a, src.as_u32x16(), k, ROUNDING)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit unsigned integers, and store +/// the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundph_epu32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2udq, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvt_roundph_epu32( + k: __mmask16, + a: __m256h, +) -> __m512i { + static_assert_rounding!(ROUNDING); + _mm512_mask_cvt_roundph_epu32::(_mm512_setzero_si512(), k, a) +} + +/// Convert the lower half-precision (16-bit) floating-point element in a to a 32-bit unsigned integer, and store +/// the result in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsh_u32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsh2usi))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtsh_u32(a: __m128h) -> u32 { + vcvtsh2usi32(a, _MM_FROUND_CUR_DIRECTION) +} + +/// Convert the lower half-precision (16-bit) floating-point element in a to a 32-bit unsigned integer, and store +/// the result in dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsh_u32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsh2usi, ROUNDING = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvt_roundsh_u32(a: __m128h) -> u32 { + static_assert_rounding!(ROUNDING); + vcvtsh2usi32(a, ROUNDING) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit integers with truncation, and +/// store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttph_epi32) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2dq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvttph_epi32(a: __m128h) -> __m128i { + _mm_mask_cvttph_epi32(_mm_undefined_si128(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit integers with truncation, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvttph_epi32) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2dq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvttph_epi32(src: __m128i, k: __mmask8, a: __m128h) -> __m128i { + transmute(vcvttph2dq_128(a, src.as_i32x4(), k)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit integers with truncation, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvttph_epi32) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2dq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvttph_epi32(k: __mmask8, a: __m128h) -> __m128i { + _mm_mask_cvttph_epi32(_mm_setzero_si128(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit integers with truncation, and +/// store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvttph_epi32) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2dq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cvttph_epi32(a: __m128h) -> __m256i { + _mm256_mask_cvttph_epi32(_mm256_undefined_si256(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit integers with truncation, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvttph_epi32) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2dq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_cvttph_epi32(src: __m256i, k: __mmask8, a: __m128h) -> __m256i { + transmute(vcvttph2dq_256(a, src.as_i32x8(), k)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit integers with truncation, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvttph_epi32) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2dq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_cvttph_epi32(k: __mmask8, a: __m128h) -> __m256i { + _mm256_mask_cvttph_epi32(_mm256_setzero_si256(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit integers with truncation, and +/// store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvttph_epi32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2dq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvttph_epi32(a: __m256h) -> __m512i { + _mm512_mask_cvttph_epi32(_mm512_undefined_epi32(), 0xffff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit integers with truncation, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvttph_epi32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2dq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvttph_epi32(src: __m512i, k: __mmask16, a: __m256h) -> __m512i { + transmute(vcvttph2dq_512( + a, + src.as_i32x16(), + k, + _MM_FROUND_CUR_DIRECTION, + )) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit integers with truncation, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvttph_epi32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2dq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvttph_epi32(k: __mmask16, a: __m256h) -> __m512i { + _mm512_mask_cvttph_epi32(_mm512_setzero_si512(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit integers with truncation, and +/// store the results in dst. +/// +/// Exceptions can be suppressed by passing `_MM_FROUND_NO_EXC` in the `sae` parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtt_roundph_epi32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2dq, SAE = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvtt_roundph_epi32(a: __m256h) -> __m512i { + static_assert_sae!(SAE); + _mm512_mask_cvtt_roundph_epi32::(_mm512_undefined_epi32(), 0xffff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit integers with truncation, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// Exceptions can be suppressed by passing `_MM_FROUND_NO_EXC` in the `sae` parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtt_roundph_epi32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2dq, SAE = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvtt_roundph_epi32( + src: __m512i, + k: __mmask16, + a: __m256h, +) -> __m512i { + static_assert_sae!(SAE); + transmute(vcvttph2dq_512(a, src.as_i32x16(), k, SAE)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit integers with truncation, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// Exceptions can be suppressed by passing `_MM_FROUND_NO_EXC` in the `sae` parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtt_roundph_epi32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2dq, SAE = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvtt_roundph_epi32(k: __mmask16, a: __m256h) -> __m512i { + static_assert_sae!(SAE); + _mm512_mask_cvtt_roundph_epi32::(_mm512_setzero_si512(), k, a) +} + +/// Convert the lower half-precision (16-bit) floating-point element in a to a 32-bit integer with truncation, and store +/// the result in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsh_i32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttsh2si))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvttsh_i32(a: __m128h) -> i32 { + vcvttsh2si32(a, _MM_FROUND_CUR_DIRECTION) +} + +/// Convert the lower half-precision (16-bit) floating-point element in a to a 32-bit integer with truncation, and store +/// the result in dst. +/// +/// Exceptions can be suppressed by passing `_MM_FROUND_NO_EXC` in the `sae` parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_roundsh_i32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttsh2si, SAE = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtt_roundsh_i32(a: __m128h) -> i32 { + static_assert_sae!(SAE); + vcvttsh2si32(a, SAE) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit unsigned integers with truncation, and +/// store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttph_epu32) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2udq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvttph_epu32(a: __m128h) -> __m128i { + _mm_mask_cvttph_epu32(_mm_undefined_si128(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit unsigned integers with truncation, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvttph_epu32) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2udq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvttph_epu32(src: __m128i, k: __mmask8, a: __m128h) -> __m128i { + transmute(vcvttph2udq_128(a, src.as_u32x4(), k)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit unsigned integers with truncation, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvttph_epu32) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2udq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvttph_epu32(k: __mmask8, a: __m128h) -> __m128i { + _mm_mask_cvttph_epu32(_mm_setzero_si128(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit unsigned integers with truncation, and +/// store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvttph_epu32) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2udq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cvttph_epu32(a: __m128h) -> __m256i { + _mm256_mask_cvttph_epu32(_mm256_undefined_si256(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit unsigned integers with truncation, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvttph_epu32) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2udq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_cvttph_epu32(src: __m256i, k: __mmask8, a: __m128h) -> __m256i { + transmute(vcvttph2udq_256(a, src.as_u32x8(), k)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit unsigned integers with truncation, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvttph_epu32) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2udq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_cvttph_epu32(k: __mmask8, a: __m128h) -> __m256i { + _mm256_mask_cvttph_epu32(_mm256_setzero_si256(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit unsigned integers with truncation, and +/// store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvttph_epu32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2udq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvttph_epu32(a: __m256h) -> __m512i { + _mm512_mask_cvttph_epu32(_mm512_undefined_epi32(), 0xffff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit unsigned integers with truncation, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvttph_epu32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2udq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvttph_epu32(src: __m512i, k: __mmask16, a: __m256h) -> __m512i { + transmute(vcvttph2udq_512( + a, + src.as_u32x16(), + k, + _MM_FROUND_CUR_DIRECTION, + )) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit unsigned integers with truncation, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvttph_epu32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2udq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvttph_epu32(k: __mmask16, a: __m256h) -> __m512i { + _mm512_mask_cvttph_epu32(_mm512_setzero_si512(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit unsigned integers with truncation, and +/// store the results in dst. +/// +/// Exceptions can be suppressed by passing `_MM_FROUND_NO_EXC` in the `sae` parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtt_roundph_epu32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2udq, SAE = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvtt_roundph_epu32(a: __m256h) -> __m512i { + static_assert_sae!(SAE); + _mm512_mask_cvtt_roundph_epu32::(_mm512_undefined_epi32(), 0xffff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit unsigned integers with truncation, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// Exceptions can be suppressed by passing `_MM_FROUND_NO_EXC` in the `sae` parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtt_roundph_epu32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2udq, SAE = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvtt_roundph_epu32( + src: __m512i, + k: __mmask16, + a: __m256h, +) -> __m512i { + static_assert_sae!(SAE); + transmute(vcvttph2udq_512(a, src.as_u32x16(), k, SAE)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 32-bit unsigned integers with truncation, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// Exceptions can be suppressed by passing `_MM_FROUND_NO_EXC` in the `sae` parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtt_roundph_epu32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2udq, SAE = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvtt_roundph_epu32(k: __mmask16, a: __m256h) -> __m512i { + static_assert_sae!(SAE); + _mm512_mask_cvtt_roundph_epu32::(_mm512_setzero_si512(), k, a) +} + +/// Convert the lower half-precision (16-bit) floating-point element in a to a 32-bit unsigned integer with truncation, and store +/// the result in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsh_u32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttsh2usi))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvttsh_u32(a: __m128h) -> u32 { + vcvttsh2usi32(a, _MM_FROUND_CUR_DIRECTION) +} + +/// Convert the lower half-precision (16-bit) floating-point element in a to a 32-bit unsigned integer with truncation, and store +/// the result in dst. +/// +/// Exceptions can be suppressed by passing `_MM_FROUND_NO_EXC` in the `sae` parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_roundsh_u32) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttsh2usi, SAE = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtt_roundsh_u32(a: __m128h) -> u32 { + static_assert_sae!(SAE); + vcvttsh2usi32(a, SAE) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit integers, and +/// store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtph_epi64) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2qq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtph_epi64(a: __m128h) -> __m128i { + _mm_mask_cvtph_epi64(_mm_undefined_si128(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit integers, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtph_epi64) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2qq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvtph_epi64(src: __m128i, k: __mmask8, a: __m128h) -> __m128i { + transmute(vcvtph2qq_128(a, src.as_i64x2(), k)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit integers, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtph_epi64) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2qq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvtph_epi64(k: __mmask8, a: __m128h) -> __m128i { + _mm_mask_cvtph_epi64(_mm_setzero_si128(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit integers, and +/// store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtph_epi64) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2qq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cvtph_epi64(a: __m128h) -> __m256i { + _mm256_mask_cvtph_epi64(_mm256_undefined_si256(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit integers, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtph_epi64) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2qq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_cvtph_epi64(src: __m256i, k: __mmask8, a: __m128h) -> __m256i { + transmute(vcvtph2qq_256(a, src.as_i64x4(), k)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit integers, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtph_epi64) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2qq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_cvtph_epi64(k: __mmask8, a: __m128h) -> __m256i { + _mm256_mask_cvtph_epi64(_mm256_setzero_si256(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit integers, and +/// store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtph_epi64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2qq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvtph_epi64(a: __m128h) -> __m512i { + _mm512_mask_cvtph_epi64(_mm512_undefined_epi32(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit integers, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtph_epi64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2qq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvtph_epi64(src: __m512i, k: __mmask8, a: __m128h) -> __m512i { + transmute(vcvtph2qq_512( + a, + src.as_i64x8(), + k, + _MM_FROUND_CUR_DIRECTION, + )) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit integers, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtph_epi64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2qq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvtph_epi64(k: __mmask8, a: __m128h) -> __m512i { + _mm512_mask_cvtph_epi64(_mm512_setzero_si512(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit integers, and +/// store the results in dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundph_epi64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2qq, ROUNDING = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvt_roundph_epi64(a: __m128h) -> __m512i { + static_assert_rounding!(ROUNDING); + _mm512_mask_cvt_roundph_epi64::(_mm512_undefined_epi32(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit integers, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundph_epi64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2qq, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvt_roundph_epi64( + src: __m512i, + k: __mmask8, + a: __m128h, +) -> __m512i { + static_assert_rounding!(ROUNDING); + transmute(vcvtph2qq_512(a, src.as_i64x8(), k, ROUNDING)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit integers, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundph_epi64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2qq, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvt_roundph_epi64( + k: __mmask8, + a: __m128h, +) -> __m512i { + static_assert_rounding!(ROUNDING); + _mm512_mask_cvt_roundph_epi64::(_mm512_setzero_si512(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit unsigned integers, and +/// store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtph_epu64) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2uqq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtph_epu64(a: __m128h) -> __m128i { + _mm_mask_cvtph_epu64(_mm_undefined_si128(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit unsigned integers, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtph_epu64) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2uqq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvtph_epu64(src: __m128i, k: __mmask8, a: __m128h) -> __m128i { + transmute(vcvtph2uqq_128(a, src.as_u64x2(), k)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit unsigned integers, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtph_epu64) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2uqq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvtph_epu64(k: __mmask8, a: __m128h) -> __m128i { + _mm_mask_cvtph_epu64(_mm_setzero_si128(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit unsigned integers, and +/// store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtph_epu64) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2uqq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cvtph_epu64(a: __m128h) -> __m256i { + _mm256_mask_cvtph_epu64(_mm256_undefined_si256(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit unsigned integers, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtph_epu64) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2uqq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_cvtph_epu64(src: __m256i, k: __mmask8, a: __m128h) -> __m256i { + transmute(vcvtph2uqq_256(a, src.as_u64x4(), k)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit unsigned integers, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtph_epu64) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2uqq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_cvtph_epu64(k: __mmask8, a: __m128h) -> __m256i { + _mm256_mask_cvtph_epu64(_mm256_setzero_si256(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit unsigned integers, and +/// store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtph_epu64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2uqq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvtph_epu64(a: __m128h) -> __m512i { + _mm512_mask_cvtph_epu64(_mm512_undefined_epi32(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit unsigned integers, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtph_epu64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2uqq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvtph_epu64(src: __m512i, k: __mmask8, a: __m128h) -> __m512i { + transmute(vcvtph2uqq_512( + a, + src.as_u64x8(), + k, + _MM_FROUND_CUR_DIRECTION, + )) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit unsigned integers, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtph_epu64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2uqq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvtph_epu64(k: __mmask8, a: __m128h) -> __m512i { + _mm512_mask_cvtph_epu64(_mm512_setzero_si512(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit unsigned integers, and +/// store the results in dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundph_epu64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2uqq, ROUNDING = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvt_roundph_epu64(a: __m128h) -> __m512i { + static_assert_rounding!(ROUNDING); + _mm512_mask_cvt_roundph_epu64::(_mm512_undefined_epi32(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit unsigned integers, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundph_epu64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2uqq, ROUNDING = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvt_roundph_epu64( + src: __m512i, + k: __mmask8, + a: __m128h, +) -> __m512i { + static_assert_rounding!(ROUNDING); + transmute(vcvtph2uqq_512(a, src.as_u64x8(), k, ROUNDING)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit unsigned integers, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundph_epu64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2uqq, ROUNDING = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvt_roundph_epu64( + k: __mmask8, + a: __m128h, +) -> __m512i { + static_assert_rounding!(ROUNDING); + _mm512_mask_cvt_roundph_epu64::(_mm512_setzero_si512(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit integers with truncation, and +/// store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttph_epi64) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2qq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvttph_epi64(a: __m128h) -> __m128i { + _mm_mask_cvttph_epi64(_mm_undefined_si128(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit integers with truncation, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvttph_epi64) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2qq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvttph_epi64(src: __m128i, k: __mmask8, a: __m128h) -> __m128i { + transmute(vcvttph2qq_128(a, src.as_i64x2(), k)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit integers with truncation, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvttph_epi64) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2qq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvttph_epi64(k: __mmask8, a: __m128h) -> __m128i { + _mm_mask_cvttph_epi64(_mm_setzero_si128(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit integers with truncation, and +/// store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvttph_epi64) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2qq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cvttph_epi64(a: __m128h) -> __m256i { + _mm256_mask_cvttph_epi64(_mm256_undefined_si256(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit integers with truncation, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvttph_epi64) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2qq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_cvttph_epi64(src: __m256i, k: __mmask8, a: __m128h) -> __m256i { + transmute(vcvttph2qq_256(a, src.as_i64x4(), k)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit integers with truncation, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvttph_epi64) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2qq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_cvttph_epi64(k: __mmask8, a: __m128h) -> __m256i { + _mm256_mask_cvttph_epi64(_mm256_setzero_si256(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit integers with truncation, and +/// store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvttph_epi64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2qq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvttph_epi64(a: __m128h) -> __m512i { + _mm512_mask_cvttph_epi64(_mm512_undefined_epi32(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit integers with truncation, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvttph_epi64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2qq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvttph_epi64(src: __m512i, k: __mmask8, a: __m128h) -> __m512i { + transmute(vcvttph2qq_512( + a, + src.as_i64x8(), + k, + _MM_FROUND_CUR_DIRECTION, + )) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit integers with truncation, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvttph_epi64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2qq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvttph_epi64(k: __mmask8, a: __m128h) -> __m512i { + _mm512_mask_cvttph_epi64(_mm512_setzero_si512(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit integers with truncation, and +/// store the results in dst. +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtt_roundph_epi64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2qq, SAE = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvtt_roundph_epi64(a: __m128h) -> __m512i { + static_assert_sae!(SAE); + _mm512_mask_cvtt_roundph_epi64::(_mm512_undefined_epi32(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit integers with truncation, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtt_roundph_epi64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2qq, SAE = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvtt_roundph_epi64( + src: __m512i, + k: __mmask8, + a: __m128h, +) -> __m512i { + static_assert_sae!(SAE); + transmute(vcvttph2qq_512(a, src.as_i64x8(), k, SAE)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit integers with truncation, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtt_roundph_epi64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2qq, SAE = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvtt_roundph_epi64(k: __mmask8, a: __m128h) -> __m512i { + static_assert_sae!(SAE); + _mm512_mask_cvtt_roundph_epi64::(_mm512_setzero_si512(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit unsigned integers with truncation, and +/// store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttph_epu64) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2uqq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvttph_epu64(a: __m128h) -> __m128i { + _mm_mask_cvttph_epu64(_mm_undefined_si128(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit unsigned integers with truncation, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvttph_epu64) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2uqq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvttph_epu64(src: __m128i, k: __mmask8, a: __m128h) -> __m128i { + transmute(vcvttph2uqq_128(a, src.as_u64x2(), k)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit unsigned integers with truncation, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvttph_epu64) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2uqq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvttph_epu64(k: __mmask8, a: __m128h) -> __m128i { + _mm_mask_cvttph_epu64(_mm_setzero_si128(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit unsigned integers with truncation, and +/// store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvttph_epu64) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2uqq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cvttph_epu64(a: __m128h) -> __m256i { + _mm256_mask_cvttph_epu64(_mm256_undefined_si256(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit unsigned integers with truncation, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvttph_epu64) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2uqq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_cvttph_epu64(src: __m256i, k: __mmask8, a: __m128h) -> __m256i { + transmute(vcvttph2uqq_256(a, src.as_u64x4(), k)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit unsigned integers with truncation, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvttph_epu64) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvttph2uqq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_cvttph_epu64(k: __mmask8, a: __m128h) -> __m256i { + _mm256_mask_cvttph_epu64(_mm256_setzero_si256(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit unsigned integers with truncation, and +/// store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvttph_epu64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2uqq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvttph_epu64(a: __m128h) -> __m512i { + _mm512_mask_cvttph_epu64(_mm512_undefined_epi32(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit unsigned integers with truncation, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvttph_epu64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2uqq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvttph_epu64(src: __m512i, k: __mmask8, a: __m128h) -> __m512i { + transmute(vcvttph2uqq_512( + a, + src.as_u64x8(), + k, + _MM_FROUND_CUR_DIRECTION, + )) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit unsigned integers with truncation, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvttph_epu64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2uqq))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvttph_epu64(k: __mmask8, a: __m128h) -> __m512i { + _mm512_mask_cvttph_epu64(_mm512_setzero_si512(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit unsigned integers with truncation, and +/// store the results in dst. +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtt_roundph_epu64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2uqq, SAE = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvtt_roundph_epu64(a: __m128h) -> __m512i { + static_assert_sae!(SAE); + _mm512_mask_cvtt_roundph_epu64::(_mm512_undefined_epi32(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit unsigned integers with truncation, and +/// store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtt_roundph_epu64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2uqq, SAE = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvtt_roundph_epu64( + src: __m512i, + k: __mmask8, + a: __m128h, +) -> __m512i { + static_assert_sae!(SAE); + transmute(vcvttph2uqq_512(a, src.as_u64x8(), k, SAE)) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed 64-bit unsigned integers with truncation, and +/// store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtt_roundph_epu64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttph2uqq, SAE = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvtt_roundph_epu64(k: __mmask8, a: __m128h) -> __m512i { + static_assert_sae!(SAE); + _mm512_mask_cvtt_roundph_epu64::(_mm512_setzero_si512(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed single-precision (32-bit) +/// floating-point elements, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtxph_ps) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2psx))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtxph_ps(a: __m128h) -> __m128 { + _mm_mask_cvtxph_ps(_mm_setzero_ps(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed single-precision (32-bit) +/// floating-point elements, and store the results in dst using writemask k (elements are copied from src to +/// dst when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtxph_ps) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2psx))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvtxph_ps(src: __m128, k: __mmask8, a: __m128h) -> __m128 { + vcvtph2psx_128(a, src, k) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed single-precision (32-bit) +/// floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the +/// corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtxph_ps) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2psx))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvtxph_ps(k: __mmask8, a: __m128h) -> __m128 { + _mm_mask_cvtxph_ps(_mm_setzero_ps(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed single-precision (32-bit) +/// floating-point elements, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtxph_ps) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2psx))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cvtxph_ps(a: __m128h) -> __m256 { + _mm256_mask_cvtxph_ps(_mm256_setzero_ps(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed single-precision (32-bit) +/// floating-point elements, and store the results in dst using writemask k (elements are copied from src to +/// dst when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtxph_ps) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2psx))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_cvtxph_ps(src: __m256, k: __mmask8, a: __m128h) -> __m256 { + vcvtph2psx_256(a, src, k) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed single-precision (32-bit) +/// floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the +/// corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtxph_ps) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2psx))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_cvtxph_ps(k: __mmask8, a: __m128h) -> __m256 { + _mm256_mask_cvtxph_ps(_mm256_setzero_ps(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed single-precision (32-bit) +/// floating-point elements, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtxph_ps) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2psx))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvtxph_ps(a: __m256h) -> __m512 { + _mm512_mask_cvtxph_ps(_mm512_setzero_ps(), 0xffff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed single-precision (32-bit) +/// floating-point elements, and store the results in dst using writemask k (elements are copied from src to +/// dst when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtxph_ps) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2psx))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvtxph_ps(src: __m512, k: __mmask16, a: __m256h) -> __m512 { + vcvtph2psx_512(a, src, k, _MM_FROUND_CUR_DIRECTION) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed single-precision (32-bit) +/// floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the +/// corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtxph_ps) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2psx))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvtxph_ps(k: __mmask16, a: __m256h) -> __m512 { + _mm512_mask_cvtxph_ps(_mm512_setzero_ps(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed single-precision (32-bit) +/// floating-point elements, and store the results in dst. +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtx_roundph_ps) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2psx, SAE = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvtx_roundph_ps(a: __m256h) -> __m512 { + static_assert_sae!(SAE); + _mm512_mask_cvtx_roundph_ps::(_mm512_setzero_ps(), 0xffff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed single-precision (32-bit) +/// floating-point elements, and store the results in dst using writemask k (elements are copied from src to +/// dst when the corresponding mask bit is not set). +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtx_roundph_ps) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2psx, SAE = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvtx_roundph_ps( + src: __m512, + k: __mmask16, + a: __m256h, +) -> __m512 { + static_assert_sae!(SAE); + vcvtph2psx_512(a, src, k, SAE) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed single-precision (32-bit) +/// floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the +/// corresponding mask bit is not set). +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtx_roundph_ps) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2psx, SAE = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvtx_roundph_ps(k: __mmask16, a: __m256h) -> __m512 { + static_assert_sae!(SAE); + _mm512_mask_cvtx_roundph_ps::(_mm512_setzero_ps(), k, a) +} + +/// Convert the lower half-precision (16-bit) floating-point element in b to a single-precision (32-bit) +/// floating-point element, store the result in the lower element of dst, and copy the upper 3 packed +/// elements from a to the upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsh_ss) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsh2ss))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtsh_ss(a: __m128, b: __m128h) -> __m128 { + _mm_mask_cvtsh_ss(a, 0xff, a, b) +} + +/// Convert the lower half-precision (16-bit) floating-point element in b to a single-precision (32-bit) +/// floating-point element, store the result in the lower element of dst using writemask k (the element is +/// copied from src to dst when mask bit 0 is not set), and copy the upper 3 packed elements from a to the +/// upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsh_ss) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsh2ss))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvtsh_ss(src: __m128, k: __mmask8, a: __m128, b: __m128h) -> __m128 { + vcvtsh2ss(a, b, src, k, _MM_FROUND_CUR_DIRECTION) +} + +/// Convert the lower half-precision (16-bit) floating-point element in b to a single-precision (32-bit) +/// floating-point element, store the result in the lower element of dst using zeromask k (the element is +/// zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements +/// of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtsh_ss) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsh2ss))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvtsh_ss(k: __mmask8, a: __m128, b: __m128h) -> __m128 { + _mm_mask_cvtsh_ss(_mm_setzero_ps(), k, a, b) +} + +/// Convert the lower half-precision (16-bit) floating-point element in b to a single-precision (32-bit) +/// floating-point element, store the result in the lower element of dst, and copy the upper 3 packed elements +/// from a to the upper elements of dst. +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsh_ss) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsh2ss, SAE = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvt_roundsh_ss(a: __m128, b: __m128h) -> __m128 { + static_assert_sae!(SAE); + _mm_mask_cvt_roundsh_ss::(_mm_undefined_ps(), 0xff, a, b) +} + +/// Convert the lower half-precision (16-bit) floating-point element in b to a single-precision (32-bit) +/// floating-point element, store the result in the lower element of dst using writemask k (the element is +/// copied from src to dst when mask bit 0 is not set), and copy the upper 3 packed elements from a to the +/// upper elements of dst. +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvt_roundsh_ss) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsh2ss, SAE = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvt_roundsh_ss( + src: __m128, + k: __mmask8, + a: __m128, + b: __m128h, +) -> __m128 { + static_assert_sae!(SAE); + vcvtsh2ss(a, b, src, k, SAE) +} + +/// Convert the lower half-precision (16-bit) floating-point element in b to a single-precision (32-bit) +/// floating-point element, store the result in the lower element of dst using zeromask k (the element is +/// zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements +/// of dst. +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvt_roundsh_ss) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsh2ss, SAE = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvt_roundsh_ss( + k: __mmask8, + a: __m128, + b: __m128h, +) -> __m128 { + static_assert_sae!(SAE); + _mm_mask_cvt_roundsh_ss::(_mm_setzero_ps(), k, a, b) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed double-precision (64-bit) +/// floating-point elements, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtph_pd) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2pd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtph_pd(a: __m128h) -> __m128d { + _mm_mask_cvtph_pd(_mm_setzero_pd(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed double-precision (64-bit) +/// floating-point elements, and store the results in dst using writemask k (elements are copied from src to +/// dst when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtph_pd) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2pd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvtph_pd(src: __m128d, k: __mmask8, a: __m128h) -> __m128d { + vcvtph2pd_128(a, src, k) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed double-precision (64-bit) +/// floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the +/// corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtph_pd) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2pd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvtph_pd(k: __mmask8, a: __m128h) -> __m128d { + _mm_mask_cvtph_pd(_mm_setzero_pd(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed double-precision (64-bit) +/// floating-point elements, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtph_pd) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2pd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cvtph_pd(a: __m128h) -> __m256d { + _mm256_mask_cvtph_pd(_mm256_setzero_pd(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed double-precision (64-bit) +/// floating-point elements, and store the results in dst using writemask k (elements are copied from src to +/// dst when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_cvtph_pd) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2pd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_mask_cvtph_pd(src: __m256d, k: __mmask8, a: __m128h) -> __m256d { + vcvtph2pd_256(a, src, k) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed double-precision (64-bit) +/// floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the +/// corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_cvtph_pd) +#[inline] +#[target_feature(enable = "avx512fp16,avx512vl")] +#[cfg_attr(test, assert_instr(vcvtph2pd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_maskz_cvtph_pd(k: __mmask8, a: __m128h) -> __m256d { + _mm256_mask_cvtph_pd(_mm256_setzero_pd(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed double-precision (64-bit) +/// floating-point elements, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtph_pd) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2pd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvtph_pd(a: __m128h) -> __m512d { + _mm512_mask_cvtph_pd(_mm512_setzero_pd(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed double-precision (64-bit) +/// floating-point elements, and store the results in dst using writemask k (elements are copied from src to +/// dst when the corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvtph_pd) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2pd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvtph_pd(src: __m512d, k: __mmask8, a: __m128h) -> __m512d { + vcvtph2pd_512(a, src, k, _MM_FROUND_CUR_DIRECTION) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed double-precision (64-bit) +/// floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the +/// corresponding mask bit is not set). +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvtph_pd) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2pd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvtph_pd(k: __mmask8, a: __m128h) -> __m512d { + _mm512_mask_cvtph_pd(_mm512_setzero_pd(), k, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed double-precision (64-bit) +/// floating-point elements, and store the results in dst. +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvt_roundph_pd) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2pd, SAE = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvt_roundph_pd(a: __m128h) -> __m512d { + static_assert_sae!(SAE); + _mm512_mask_cvt_roundph_pd::(_mm512_setzero_pd(), 0xff, a) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed double-precision (64-bit) +/// floating-point elements, and store the results in dst using writemask k (elements are copied from src to +/// dst when the corresponding mask bit is not set). +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cvt_roundph_pd) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2pd, SAE = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cvt_roundph_pd( + src: __m512d, + k: __mmask8, + a: __m128h, +) -> __m512d { + static_assert_sae!(SAE); + vcvtph2pd_512(a, src, k, SAE) +} + +/// Convert packed half-precision (16-bit) floating-point elements in a to packed double-precision (64-bit) +/// floating-point elements, and store the results in dst using zeromask k (elements are zeroed out when the +/// corresponding mask bit is not set). +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_cvt_roundph_pd) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtph2pd, SAE = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_maskz_cvt_roundph_pd(k: __mmask8, a: __m128h) -> __m512d { + static_assert_sae!(SAE); + _mm512_mask_cvt_roundph_pd::(_mm512_setzero_pd(), k, a) +} + +/// Convert the lower half-precision (16-bit) floating-point element in b to a double-precision (64-bit) +/// floating-point element, store the result in the lower element of dst, and copy the upper element +/// from a to the upper element of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsh_sd) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsh2sd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtsh_sd(a: __m128d, b: __m128h) -> __m128d { + _mm_mask_cvtsh_sd(a, 0xff, a, b) +} + +/// Convert the lower half-precision (16-bit) floating-point element in b to a double-precision (64-bit) +/// floating-point element, store the result in the lower element of dst using writemask k (the element is +/// copied from src to dst when mask bit 0 is not set), and copy the upper element from a to the upper element +/// of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvtsh_sd) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsh2sd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvtsh_sd(src: __m128d, k: __mmask8, a: __m128d, b: __m128h) -> __m128d { + vcvtsh2sd(a, b, src, k, _MM_FROUND_CUR_DIRECTION) +} + +/// Convert the lower half-precision (16-bit) floating-point element in b to a double-precision (64-bit) +/// floating-point element, store the result in the lower element of dst using zeromask k (the element is +/// zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvtsh_sd) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsh2sd))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvtsh_sd(k: __mmask8, a: __m128d, b: __m128h) -> __m128d { + _mm_mask_cvtsh_sd(_mm_setzero_pd(), k, a, b) +} + +/// Convert the lower half-precision (16-bit) floating-point element in b to a double-precision (64-bit) +/// floating-point element, store the result in the lower element of dst, and copy the upper element from a +/// to the upper element of dst. +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsh_sd) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsh2sd, SAE = 8))] +#[rustc_legacy_const_generics(2)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvt_roundsh_sd(a: __m128d, b: __m128h) -> __m128d { + static_assert_sae!(SAE); + _mm_mask_cvt_roundsh_sd::(a, 0xff, a, b) +} + +/// Convert the lower half-precision (16-bit) floating-point element in b to a double-precision (64-bit) +/// floating-point element, store the result in the lower element of dst using writemask k (the element is +/// copied from src to dst when mask bit 0 is not set), and copy the upper element from a to the upper element +/// of dst. +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_cvt_roundsh_sd) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsh2sd, SAE = 8))] +#[rustc_legacy_const_generics(4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_mask_cvt_roundsh_sd( + src: __m128d, + k: __mmask8, + a: __m128d, + b: __m128h, +) -> __m128d { + static_assert_sae!(SAE); + vcvtsh2sd(a, b, src, k, SAE) +} + +/// Convert the lower half-precision (16-bit) floating-point element in b to a double-precision (64-bit) +/// floating-point element, store the result in the lower element of dst using zeromask k (the element is +/// zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst. +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_cvt_roundsh_sd) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsh2sd, SAE = 8))] +#[rustc_legacy_const_generics(3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_maskz_cvt_roundsh_sd( + k: __mmask8, + a: __m128d, + b: __m128h, +) -> __m128d { + static_assert_sae!(SAE); + _mm_mask_cvt_roundsh_sd::(_mm_setzero_pd(), k, a, b) +} + +#[allow(improper_ctypes)] +extern "C" { + #[link_name = "llvm.x86.avx512fp16.mask.cmp.sh"] + fn vcmpsh(a: __m128h, b: __m128h, imm8: i32, mask: __mmask8, sae: i32) -> __mmask8; + #[link_name = "llvm.x86.avx512fp16.vcomi.sh"] + fn vcomish(a: __m128h, b: __m128h, imm8: i32, sae: i32) -> i32; + + #[link_name = "llvm.x86.avx512fp16.add.ph.512"] + fn vaddph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.sub.ph.512"] + fn vsubph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mul.ph.512"] + fn vmulph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.div.ph.512"] + fn vdivph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; + + #[link_name = "llvm.x86.avx512fp16.mask.add.sh.round"] + fn vaddsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.sub.sh.round"] + fn vsubsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.mul.sh.round"] + fn vmulsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.div.sh.round"] + fn vdivsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.128"] + fn vfmulcph_128(a: __m128, b: __m128, src: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.256"] + fn vfmulcph_256(a: __m256, b: __m256, src: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.mask.vfmul.cph.512"] + fn vfmulcph_512(a: __m512, b: __m512, src: __m512, k: __mmask16, rounding: i32) -> __m512; + #[link_name = "llvm.x86.avx512fp16.mask.vfmul.csh"] + fn vfmulcsh(a: __m128, b: __m128, src: __m128, k: __mmask8, rounding: i32) -> __m128; + + #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.128"] + fn vfcmulcph_128(a: __m128, b: __m128, src: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.256"] + fn vfcmulcph_256(a: __m256, b: __m256, src: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.cph.512"] + fn vfcmulcph_512(a: __m512, b: __m512, src: __m512, k: __mmask16, rounding: i32) -> __m512; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmul.csh"] + fn vfcmulcsh(a: __m128, b: __m128, src: __m128, k: __mmask8, rounding: i32) -> __m128; + + #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.128"] + fn vfmaddcph_mask3_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.128"] + fn vfmaddcph_maskz_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.256"] + fn vfmaddcph_mask3_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.256"] + fn vfmaddcph_maskz_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.cph.512"] + fn vfmaddcph_mask3_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) -> __m512; + #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.cph.512"] + fn vfmaddcph_maskz_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) -> __m512; + #[link_name = "llvm.x86.avx512fp16.mask.vfmadd.csh"] + fn vfmaddcsh_mask(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; + #[link_name = "llvm.x86.avx512fp16.maskz.vfmadd.csh"] + fn vfmaddcsh_maskz(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; + + #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.128"] + fn vfcmaddcph_mask3_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.128"] + fn vfcmaddcph_maskz_128(a: __m128, b: __m128, c: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.256"] + fn vfcmaddcph_mask3_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.256"] + fn vfcmaddcph_maskz_256(a: __m256, b: __m256, c: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.cph.512"] + fn vfcmaddcph_mask3_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) + -> __m512; + #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.cph.512"] + fn vfcmaddcph_maskz_512(a: __m512, b: __m512, c: __m512, k: __mmask16, rounding: i32) + -> __m512; + #[link_name = "llvm.x86.avx512fp16.mask.vfcmadd.csh"] + fn vfcmaddcsh_mask(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; + #[link_name = "llvm.x86.avx512fp16.maskz.vfcmadd.csh"] + fn vfcmaddcsh_maskz(a: __m128, b: __m128, c: __m128, k: __mmask8, rounding: i32) -> __m128; + + #[link_name = "llvm.x86.avx512fp16.vfmadd.ph.512"] + fn vfmaddph_512(a: __m512h, b: __m512h, c: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.fma.f16"] + fn fmaf16(a: f16, b: f16, c: f16) -> f16; // TODO: use `crate::intrinsics::fmaf16` when it's available + #[link_name = "llvm.x86.avx512fp16.vfmadd.f16"] + fn vfmaddsh(a: f16, b: f16, c: f16, rounding: i32) -> f16; + + #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.128"] + fn vfmaddsubph_128(a: __m128h, b: __m128h, c: __m128h) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.256"] + fn vfmaddsubph_256(a: __m256h, b: __m256h, c: __m256h) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.vfmaddsub.ph.512"] + fn vfmaddsubph_512(a: __m512h, b: __m512h, c: __m512h, rounding: i32) -> __m512h; + + #[link_name = "llvm.x86.avx512fp16.mask.rcp.ph.128"] + fn vrcpph_128(a: __m128h, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.rcp.ph.256"] + fn vrcpph_256(a: __m256h, src: __m256h, k: __mmask16) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.mask.rcp.ph.512"] + fn vrcpph_512(a: __m512h, src: __m512h, k: __mmask32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.rcp.sh"] + fn vrcpsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.ph.128"] + fn vrsqrtph_128(a: __m128h, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.ph.256"] + fn vrsqrtph_256(a: __m256h, src: __m256h, k: __mmask16) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.ph.512"] + fn vrsqrtph_512(a: __m512h, src: __m512h, k: __mmask32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.rsqrt.sh"] + fn vrsqrtsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.sqrt.ph.512"] + fn vsqrtph_512(a: __m512h, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.sqrt.sh"] + fn vsqrtsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.max.ph.128"] + fn vmaxph_128(a: __m128h, b: __m128h) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.max.ph.256"] + fn vmaxph_256(a: __m256h, b: __m256h) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.max.ph.512"] + fn vmaxph_512(a: __m512h, b: __m512h, sae: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.max.sh.round"] + fn vmaxsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, sae: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.min.ph.128"] + fn vminph_128(a: __m128h, b: __m128h) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.min.ph.256"] + fn vminph_256(a: __m256h, b: __m256h) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.min.ph.512"] + fn vminph_512(a: __m512h, b: __m512h, sae: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.min.sh.round"] + fn vminsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, sae: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.getexp.ph.128"] + fn vgetexpph_128(a: __m128h, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.getexp.ph.256"] + fn vgetexpph_256(a: __m256h, src: __m256h, k: __mmask16) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.mask.getexp.ph.512"] + fn vgetexpph_512(a: __m512h, src: __m512h, k: __mmask32, sae: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.getexp.sh"] + fn vgetexpsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, sae: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.getmant.ph.128"] + fn vgetmantph_128(a: __m128h, imm8: i32, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.getmant.ph.256"] + fn vgetmantph_256(a: __m256h, imm8: i32, src: __m256h, k: __mmask16) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.mask.getmant.ph.512"] + fn vgetmantph_512(a: __m512h, imm8: i32, src: __m512h, k: __mmask32, sae: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.getmant.sh"] + fn vgetmantsh( + a: __m128h, b: __m128h, imm8: i32, src: __m128h, @@ -13238,4262 +16032,6648 @@ extern "C" { sae: i32, ) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.rndscale.ph.128"] - fn vrndscaleph_128(a: __m128h, imm8: i32, src: __m128h, k: __mmask8) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.rndscale.ph.256"] - fn vrndscaleph_256(a: __m256h, imm8: i32, src: __m256h, k: __mmask16) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.mask.rndscale.ph.512"] - fn vrndscaleph_512(a: __m512h, imm8: i32, src: __m512h, k: __mmask32, sae: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mask.rndscale.sh"] - fn vrndscalesh( - a: __m128h, - b: __m128h, - src: __m128h, - k: __mmask8, - imm8: i32, - sae: i32, - ) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.rndscale.ph.128"] + fn vrndscaleph_128(a: __m128h, imm8: i32, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.rndscale.ph.256"] + fn vrndscaleph_256(a: __m256h, imm8: i32, src: __m256h, k: __mmask16) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.mask.rndscale.ph.512"] + fn vrndscaleph_512(a: __m512h, imm8: i32, src: __m512h, k: __mmask32, sae: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.rndscale.sh"] + fn vrndscalesh( + a: __m128h, + b: __m128h, + src: __m128h, + k: __mmask8, + imm8: i32, + sae: i32, + ) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.scalef.ph.128"] + fn vscalefph_128(a: __m128h, b: __m128h, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.scalef.ph.256"] + fn vscalefph_256(a: __m256h, b: __m256h, src: __m256h, k: __mmask16) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.mask.scalef.ph.512"] + fn vscalefph_512(a: __m512h, b: __m512h, src: __m512h, k: __mmask32, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.scalef.sh"] + fn vscalefsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.reduce.ph.128"] + fn vreduceph_128(a: __m128h, imm8: i32, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.reduce.ph.256"] + fn vreduceph_256(a: __m256h, imm8: i32, src: __m256h, k: __mmask16) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.mask.reduce.ph.512"] + fn vreduceph_512(a: __m512h, imm8: i32, src: __m512h, k: __mmask32, sae: i32) -> __m512h; + #[link_name = "llvm.x86.avx512fp16.mask.reduce.sh"] + fn vreducesh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, imm8: i32, sae: i32) + -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.fpclass.sh"] + fn vfpclasssh(a: __m128h, imm8: i32, k: __mmask8) -> __mmask8; + + #[link_name = "llvm.x86.avx512.sitofp.round.v8f16.v8i16"] + fn vcvtw2ph_128(a: i16x8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512.sitofp.round.v16f16.v16i16"] + fn vcvtw2ph_256(a: i16x16, rounding: i32) -> __m256h; + #[link_name = "llvm.x86.avx512.sitofp.round.v32f16.v32i16"] + fn vcvtw2ph_512(a: i16x32, rounding: i32) -> __m512h; + #[link_name = "llvm.x86.avx512.uitofp.round.v8f16.v8u16"] + fn vcvtuw2ph_128(a: u16x8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512.uitofp.round.v16f16.v16u16"] + fn vcvtuw2ph_256(a: u16x16, rounding: i32) -> __m256h; + #[link_name = "llvm.x86.avx512.uitofp.round.v32f16.v32u16"] + fn vcvtuw2ph_512(a: u16x32, rounding: i32) -> __m512h; + + #[link_name = "llvm.x86.avx512fp16.mask.vcvtdq2ph.128"] + fn vcvtdq2ph_128(a: i32x4, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512.sitofp.round.v8f16.v8i32"] + fn vcvtdq2ph_256(a: i32x8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512.sitofp.round.v16f16.v16i32"] + fn vcvtdq2ph_512(a: i32x16, rounding: i32) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.vcvtsi2sh"] + fn vcvtsi2sh(a: __m128h, b: i32, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtudq2ph.128"] + fn vcvtudq2ph_128(a: u32x4, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512.uitofp.round.v8f16.v8u32"] + fn vcvtudq2ph_256(a: u32x8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512.uitofp.round.v16f16.v16u32"] + fn vcvtudq2ph_512(a: u32x16, rounding: i32) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.vcvtusi2sh"] + fn vcvtusi2sh(a: __m128h, b: u32, rounding: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.vcvtqq2ph.128"] + fn vcvtqq2ph_128(a: i64x2, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtqq2ph.256"] + fn vcvtqq2ph_256(a: i64x4, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512.sitofp.round.v8f16.v8i64"] + fn vcvtqq2ph_512(a: i64x8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtuqq2ph.128"] + fn vcvtuqq2ph_128(a: u64x2, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtuqq2ph.256"] + fn vcvtuqq2ph_256(a: u64x4, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512.uitofp.round.v8f16.v8u64"] + fn vcvtuqq2ph_512(a: u64x8, rounding: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.vcvtps2phx.128"] + fn vcvtps2phx_128(a: __m128, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtps2phx.256"] + fn vcvtps2phx_256(a: __m256, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtps2phx.512"] + fn vcvtps2phx_512(a: __m512, src: __m256h, k: __mmask16, rounding: i32) -> __m256h; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtss2sh.round"] + fn vcvtss2sh(a: __m128h, b: __m128, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.vcvtpd2ph.128"] + fn vcvtpd2ph_128(a: __m128d, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtpd2ph.256"] + fn vcvtpd2ph_256(a: __m256d, src: __m128h, k: __mmask8) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtpd2ph.512"] + fn vcvtpd2ph_512(a: __m512d, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtsd2sh.round"] + fn vcvtsd2sh(a: __m128h, b: __m128d, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + + #[link_name = "llvm.x86.avx512fp16.mask.vcvtph2w.128"] + fn vcvtph2w_128(a: __m128h, src: i16x8, k: __mmask8) -> i16x8; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtph2w.256"] + fn vcvtph2w_256(a: __m256h, src: i16x16, k: __mmask16) -> i16x16; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtph2w.512"] + fn vcvtph2w_512(a: __m512h, src: i16x32, k: __mmask32, rounding: i32) -> i16x32; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtph2uw.128"] + fn vcvtph2uw_128(a: __m128h, src: u16x8, k: __mmask8) -> u16x8; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtph2uw.256"] + fn vcvtph2uw_256(a: __m256h, src: u16x16, k: __mmask16) -> u16x16; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtph2uw.512"] + fn vcvtph2uw_512(a: __m512h, src: u16x32, k: __mmask32, rounding: i32) -> u16x32; + + #[link_name = "llvm.x86.avx512fp16.mask.vcvttph2w.128"] + fn vcvttph2w_128(a: __m128h, src: i16x8, k: __mmask8) -> i16x8; + #[link_name = "llvm.x86.avx512fp16.mask.vcvttph2w.256"] + fn vcvttph2w_256(a: __m256h, src: i16x16, k: __mmask16) -> i16x16; + #[link_name = "llvm.x86.avx512fp16.mask.vcvttph2w.512"] + fn vcvttph2w_512(a: __m512h, src: i16x32, k: __mmask32, sae: i32) -> i16x32; + #[link_name = "llvm.x86.avx512fp16.mask.vcvttph2uw.128"] + fn vcvttph2uw_128(a: __m128h, src: u16x8, k: __mmask8) -> u16x8; + #[link_name = "llvm.x86.avx512fp16.mask.vcvttph2uw.256"] + fn vcvttph2uw_256(a: __m256h, src: u16x16, k: __mmask16) -> u16x16; + #[link_name = "llvm.x86.avx512fp16.mask.vcvttph2uw.512"] + fn vcvttph2uw_512(a: __m512h, src: u16x32, k: __mmask32, sae: i32) -> u16x32; + + #[link_name = "llvm.x86.avx512fp16.mask.vcvtph2dq.128"] + fn vcvtph2dq_128(a: __m128h, src: i32x4, k: __mmask8) -> i32x4; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtph2dq.256"] + fn vcvtph2dq_256(a: __m128h, src: i32x8, k: __mmask8) -> i32x8; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtph2dq.512"] + fn vcvtph2dq_512(a: __m256h, src: i32x16, k: __mmask16, rounding: i32) -> i32x16; + #[link_name = "llvm.x86.avx512fp16.vcvtsh2si32"] + fn vcvtsh2si32(a: __m128h, rounding: i32) -> i32; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtph2udq.128"] + fn vcvtph2udq_128(a: __m128h, src: u32x4, k: __mmask8) -> u32x4; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtph2udq.256"] + fn vcvtph2udq_256(a: __m128h, src: u32x8, k: __mmask8) -> u32x8; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtph2udq.512"] + fn vcvtph2udq_512(a: __m256h, src: u32x16, k: __mmask16, rounding: i32) -> u32x16; + #[link_name = "llvm.x86.avx512fp16.vcvtsh2usi32"] + fn vcvtsh2usi32(a: __m128h, sae: i32) -> u32; + + #[link_name = "llvm.x86.avx512fp16.mask.vcvttph2dq.128"] + fn vcvttph2dq_128(a: __m128h, src: i32x4, k: __mmask8) -> i32x4; + #[link_name = "llvm.x86.avx512fp16.mask.vcvttph2dq.256"] + fn vcvttph2dq_256(a: __m128h, src: i32x8, k: __mmask8) -> i32x8; + #[link_name = "llvm.x86.avx512fp16.mask.vcvttph2dq.512"] + fn vcvttph2dq_512(a: __m256h, src: i32x16, k: __mmask16, sae: i32) -> i32x16; + #[link_name = "llvm.x86.avx512fp16.vcvttsh2si32"] + fn vcvttsh2si32(a: __m128h, sae: i32) -> i32; + #[link_name = "llvm.x86.avx512fp16.mask.vcvttph2udq.128"] + fn vcvttph2udq_128(a: __m128h, src: u32x4, k: __mmask8) -> u32x4; + #[link_name = "llvm.x86.avx512fp16.mask.vcvttph2udq.256"] + fn vcvttph2udq_256(a: __m128h, src: u32x8, k: __mmask8) -> u32x8; + #[link_name = "llvm.x86.avx512fp16.mask.vcvttph2udq.512"] + fn vcvttph2udq_512(a: __m256h, src: u32x16, k: __mmask16, sae: i32) -> u32x16; + #[link_name = "llvm.x86.avx512fp16.vcvttsh2usi32"] + fn vcvttsh2usi32(a: __m128h, sae: i32) -> u32; + + #[link_name = "llvm.x86.avx512fp16.mask.vcvtph2qq.128"] + fn vcvtph2qq_128(a: __m128h, src: i64x2, k: __mmask8) -> i64x2; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtph2qq.256"] + fn vcvtph2qq_256(a: __m128h, src: i64x4, k: __mmask8) -> i64x4; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtph2qq.512"] + fn vcvtph2qq_512(a: __m128h, src: i64x8, k: __mmask8, rounding: i32) -> i64x8; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtph2uqq.128"] + fn vcvtph2uqq_128(a: __m128h, src: u64x2, k: __mmask8) -> u64x2; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtph2uqq.256"] + fn vcvtph2uqq_256(a: __m128h, src: u64x4, k: __mmask8) -> u64x4; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtph2uqq.512"] + fn vcvtph2uqq_512(a: __m128h, src: u64x8, k: __mmask8, rounding: i32) -> u64x8; + + #[link_name = "llvm.x86.avx512fp16.mask.vcvttph2qq.128"] + fn vcvttph2qq_128(a: __m128h, src: i64x2, k: __mmask8) -> i64x2; + #[link_name = "llvm.x86.avx512fp16.mask.vcvttph2qq.256"] + fn vcvttph2qq_256(a: __m128h, src: i64x4, k: __mmask8) -> i64x4; + #[link_name = "llvm.x86.avx512fp16.mask.vcvttph2qq.512"] + fn vcvttph2qq_512(a: __m128h, src: i64x8, k: __mmask8, sae: i32) -> i64x8; + #[link_name = "llvm.x86.avx512fp16.mask.vcvttph2uqq.128"] + fn vcvttph2uqq_128(a: __m128h, src: u64x2, k: __mmask8) -> u64x2; + #[link_name = "llvm.x86.avx512fp16.mask.vcvttph2uqq.256"] + fn vcvttph2uqq_256(a: __m128h, src: u64x4, k: __mmask8) -> u64x4; + #[link_name = "llvm.x86.avx512fp16.mask.vcvttph2uqq.512"] + fn vcvttph2uqq_512(a: __m128h, src: u64x8, k: __mmask8, sae: i32) -> u64x8; + + #[link_name = "llvm.x86.avx512fp16.mask.vcvtph2psx.128"] + fn vcvtph2psx_128(a: __m128h, src: __m128, k: __mmask8) -> __m128; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtph2psx.256"] + fn vcvtph2psx_256(a: __m128h, src: __m256, k: __mmask8) -> __m256; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtph2psx.512"] + fn vcvtph2psx_512(a: __m256h, src: __m512, k: __mmask16, sae: i32) -> __m512; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtsh2ss.round"] + fn vcvtsh2ss(a: __m128, b: __m128h, src: __m128, k: __mmask8, sae: i32) -> __m128; + + #[link_name = "llvm.x86.avx512fp16.mask.vcvtph2pd.128"] + fn vcvtph2pd_128(a: __m128h, src: __m128d, k: __mmask8) -> __m128d; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtph2pd.256"] + fn vcvtph2pd_256(a: __m128h, src: __m256d, k: __mmask8) -> __m256d; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtph2pd.512"] + fn vcvtph2pd_512(a: __m128h, src: __m512d, k: __mmask8, sae: i32) -> __m512d; + #[link_name = "llvm.x86.avx512fp16.mask.vcvtsh2sd.round"] + fn vcvtsh2sd(a: __m128d, b: __m128h, src: __m128d, k: __mmask8, sae: i32) -> __m128d; + +} + +#[cfg(test)] +mod tests { + use crate::core_arch::x86::*; + use crate::mem::transmute; + use crate::ptr::{addr_of, addr_of_mut}; + use stdarch_test::simd_test; + + #[target_feature(enable = "avx512fp16")] + unsafe fn _mm_set1_pch(re: f16, im: f16) -> __m128h { + _mm_setr_ph(re, im, re, im, re, im, re, im) + } + + #[target_feature(enable = "avx512fp16")] + unsafe fn _mm256_set1_pch(re: f16, im: f16) -> __m256h { + _mm256_setr_ph( + re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, + ) + } + + #[target_feature(enable = "avx512fp16")] + unsafe fn _mm512_set1_pch(re: f16, im: f16) -> __m512h { + _mm512_setr_ph( + re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, + re, im, re, im, re, im, re, im, re, im, + ) + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_set_ph() { + let r = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let e = _mm_setr_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_set_ph() { + let r = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let e = _mm256_setr_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_set_ph() { + let r = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let e = _mm512_setr_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_set_sh() { + let r = _mm_set_sh(1.0); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_set1_ph() { + let r = _mm_set1_ph(1.0); + let e = _mm_set_ph(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_set1_ph() { + let r = _mm256_set1_ph(1.0); + let e = _mm256_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_set1_ph() { + let r = _mm512_set1_ph(1.0); + let e = _mm512_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_setr_ph() { + let r = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let e = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_setr_ph() { + let r = _mm256_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let e = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_setr_ph() { + let r = _mm512_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let e = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_setzero_ph() { + let r = _mm_setzero_ph(); + let e = _mm_set1_ph(0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_setzero_ph() { + let r = _mm256_setzero_ph(); + let e = _mm256_set1_ph(0.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_setzero_ph() { + let r = _mm512_setzero_ph(); + let e = _mm512_set1_ph(0.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castsi128_ph() { + let a = _mm_set1_epi16(0x3c00); + let r = _mm_castsi128_ph(a); + let e = _mm_set1_ph(1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castsi256_ph() { + let a = _mm256_set1_epi16(0x3c00); + let r = _mm256_castsi256_ph(a); + let e = _mm256_set1_ph(1.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castsi512_ph() { + let a = _mm512_set1_epi16(0x3c00); + let r = _mm512_castsi512_ph(a); + let e = _mm512_set1_ph(1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castph_si128() { + let a = _mm_set1_ph(1.0); + let r = _mm_castph_si128(a); + let e = _mm_set1_epi16(0x3c00); + assert_eq_m128i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph_si256() { + let a = _mm256_set1_ph(1.0); + let r = _mm256_castph_si256(a); + let e = _mm256_set1_epi16(0x3c00); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph_si512() { + let a = _mm512_set1_ph(1.0); + let r = _mm512_castph_si512(a); + let e = _mm512_set1_epi16(0x3c00); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castps_ph() { + let a = _mm_castsi128_ps(_mm_set1_epi16(0x3c00)); + let r = _mm_castps_ph(a); + let e = _mm_set1_ph(1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castps_ph() { + let a = _mm256_castsi256_ps(_mm256_set1_epi16(0x3c00)); + let r = _mm256_castps_ph(a); + let e = _mm256_set1_ph(1.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castps_ph() { + let a = _mm512_castsi512_ps(_mm512_set1_epi16(0x3c00)); + let r = _mm512_castps_ph(a); + let e = _mm512_set1_ph(1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castph_ps() { + let a = _mm_castsi128_ph(_mm_set1_epi32(0x3f800000)); + let r = _mm_castph_ps(a); + let e = _mm_set1_ps(1.0); + assert_eq_m128(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph_ps() { + let a = _mm256_castsi256_ph(_mm256_set1_epi32(0x3f800000)); + let r = _mm256_castph_ps(a); + let e = _mm256_set1_ps(1.0); + assert_eq_m256(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph_ps() { + let a = _mm512_castsi512_ph(_mm512_set1_epi32(0x3f800000)); + let r = _mm512_castph_ps(a); + let e = _mm512_set1_ps(1.0); + assert_eq_m512(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castpd_ph() { + let a = _mm_castsi128_pd(_mm_set1_epi16(0x3c00)); + let r = _mm_castpd_ph(a); + let e = _mm_set1_ph(1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castpd_ph() { + let a = _mm256_castsi256_pd(_mm256_set1_epi16(0x3c00)); + let r = _mm256_castpd_ph(a); + let e = _mm256_set1_ph(1.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castpd_ph() { + let a = _mm512_castsi512_pd(_mm512_set1_epi16(0x3c00)); + let r = _mm512_castpd_ph(a); + let e = _mm512_set1_ph(1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_castph_pd() { + let a = _mm_castsi128_ph(_mm_set1_epi64x(0x3ff0000000000000)); + let r = _mm_castph_pd(a); + let e = _mm_set1_pd(1.0); + assert_eq_m128d(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph_pd() { + let a = _mm256_castsi256_ph(_mm256_set1_epi64x(0x3ff0000000000000)); + let r = _mm256_castph_pd(a); + let e = _mm256_set1_pd(1.0); + assert_eq_m256d(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph_pd() { + let a = _mm512_castsi512_ph(_mm512_set1_epi64(0x3ff0000000000000)); + let r = _mm512_castph_pd(a); + let e = _mm512_set1_pd(1.0); + assert_eq_m512d(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph256_ph128() { + let a = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + let r = _mm256_castph256_ph128(a); + let e = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph512_ph128() { + let a = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., + ); + let r = _mm512_castph512_ph128(a); + let e = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph512_ph256() { + let a = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., + ); + let r = _mm512_castph512_ph256(a); + let e = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_castph128_ph256() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm256_castph128_ph256(a); + assert_eq_m128h(_mm256_castph256_ph128(r), a); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph128_ph512() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm512_castph128_ph512(a); + assert_eq_m128h(_mm512_castph512_ph128(r), a); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_castph256_ph512() { + let a = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + let r = _mm512_castph256_ph512(a); + assert_eq_m256h(_mm512_castph512_ph256(r), a); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_zextph128_ph256() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm256_zextph128_ph256(a); + let e = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 0., 0., 0., 0., 0., 0., 0., 0., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_zextph128_ph512() { + let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + let r = _mm512_zextph128_ph512(a); + let e = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_zextph256_ph512() { + let a = _mm256_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + ); + let r = _mm512_zextph256_ph512(a); + let e = _mm512_setr_ph( + 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_cmp_ph_mask() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0); + let r = _mm_cmp_ph_mask::<_CMP_EQ_OQ>(a, b); + assert_eq!(r, 0b11110000); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_cmp_ph_mask() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0); + let r = _mm_mask_cmp_ph_mask::<_CMP_EQ_OQ>(0b01010101, a, b); + assert_eq!(r, 0b01010000); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_cmp_ph_mask() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0, 9.0, 10.0, 11.0, 12.0, -13.0, -14.0, -15.0, + -16.0, + ); + let r = _mm256_cmp_ph_mask::<_CMP_EQ_OQ>(a, b); + assert_eq!(r, 0b1111000011110000); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_cmp_ph_mask() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0, 9.0, 10.0, 11.0, 12.0, -13.0, -14.0, -15.0, + -16.0, + ); + let r = _mm256_mask_cmp_ph_mask::<_CMP_EQ_OQ>(0b0101010101010101, a, b); + assert_eq!(r, 0b0101000001010000); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_cmp_ph_mask() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0, 9.0, 10.0, 11.0, 12.0, -13.0, -14.0, -15.0, + -16.0, 17.0, 18.0, 19.0, 20.0, -21.0, -22.0, -23.0, -24.0, 25.0, 26.0, 27.0, 28.0, + -29.0, -30.0, -31.0, -32.0, + ); + let r = _mm512_cmp_ph_mask::<_CMP_EQ_OQ>(a, b); + assert_eq!(r, 0b11110000111100001111000011110000); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_cmp_ph_mask() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0, 9.0, 10.0, 11.0, 12.0, -13.0, -14.0, -15.0, + -16.0, 17.0, 18.0, 19.0, 20.0, -21.0, -22.0, -23.0, -24.0, 25.0, 26.0, 27.0, 28.0, + -29.0, -30.0, -31.0, -32.0, + ); + let r = _mm512_mask_cmp_ph_mask::<_CMP_EQ_OQ>(0b01010101010101010101010101010101, a, b); + assert_eq!(r, 0b01010000010100000101000001010000); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cmp_round_sh_mask() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_cmp_round_sh_mask::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_cmp_round_sh_mask() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_mask_cmp_round_sh_mask::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(0, a, b); + assert_eq!(r, 0); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cmp_sh_mask() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_cmp_sh_mask::<_CMP_EQ_OQ>(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_cmp_sh_mask() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_mask_cmp_sh_mask::<_CMP_EQ_OQ>(0, a, b); + assert_eq!(r, 0); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comi_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_comi_round_sh::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comi_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_comi_sh::<_CMP_EQ_OQ>(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comieq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_comieq_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comige_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_comige_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comigt_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_comigt_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comile_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_comile_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comilt_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_comilt_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_comineq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_comineq_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomieq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(1.0); + let r = _mm_ucomieq_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomige_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_ucomige_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomigt_sh() { + let a = _mm_set_sh(2.0); + let b = _mm_set_sh(1.0); + let r = _mm_ucomigt_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomile_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_ucomile_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomilt_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_ucomilt_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_ucomineq_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_ucomineq_sh(a, b); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_load_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_load_ph(addr_of!(a).cast()); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_load_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_load_ph(addr_of!(a).cast()); + assert_eq_m256h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_load_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_load_ph(addr_of!(a).cast()); + assert_eq_m512h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_load_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_load_sh(addr_of!(a).cast()); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_load_sh() { + let a = _mm_set_sh(1.0); + let src = _mm_set_sh(2.); + let b = _mm_mask_load_sh(src, 1, addr_of!(a).cast()); + assert_eq_m128h(a, b); + let b = _mm_mask_load_sh(src, 0, addr_of!(a).cast()); + assert_eq_m128h(src, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_load_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_maskz_load_sh(1, addr_of!(a).cast()); + assert_eq_m128h(a, b); + let b = _mm_maskz_load_sh(0, addr_of!(a).cast()); + assert_eq_m128h(_mm_setzero_ph(), b); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_loadu_ph() { + let array = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; + let r = _mm_loadu_ph(array.as_ptr()); + let e = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_loadu_ph() { + let array = [ + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ]; + let r = _mm256_loadu_ph(array.as_ptr()); + let e = _mm256_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_loadu_ph() { + let array = [ + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ]; + let r = _mm512_loadu_ph(array.as_ptr()); + let e = _mm512_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_move_sh() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_sh(9.0); + let r = _mm_move_sh(a, b); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_move_sh() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_sh(9.0); + let src = _mm_set_sh(10.0); + let r = _mm_mask_move_sh(src, 0, a, b); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 10.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_move_sh() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_sh(9.0); + let r = _mm_maskz_move_sh(0, a, b); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_store_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let mut b = _mm_setzero_ph(); + _mm_store_ph(addr_of_mut!(b).cast(), a); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_store_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let mut b = _mm256_setzero_ph(); + _mm256_store_ph(addr_of_mut!(b).cast(), a); + assert_eq_m256h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_store_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let mut b = _mm512_setzero_ph(); + _mm512_store_ph(addr_of_mut!(b).cast(), a); + assert_eq_m512h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_store_sh() { + let a = _mm_set_sh(1.0); + let mut b = _mm_setzero_ph(); + _mm_store_sh(addr_of_mut!(b).cast(), a); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_store_sh() { + let a = _mm_set_sh(1.0); + let mut b = _mm_setzero_ph(); + _mm_mask_store_sh(addr_of_mut!(b).cast(), 0, a); + assert_eq_m128h(_mm_setzero_ph(), b); + _mm_mask_store_sh(addr_of_mut!(b).cast(), 1, a); + assert_eq_m128h(a, b); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_storeu_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let mut array = [0.0; 8]; + _mm_storeu_ph(array.as_mut_ptr(), a); + assert_eq_m128h(a, _mm_loadu_ph(array.as_ptr())); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_storeu_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let mut array = [0.0; 16]; + _mm256_storeu_ph(array.as_mut_ptr(), a); + assert_eq_m256h(a, _mm256_loadu_ph(array.as_ptr())); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_storeu_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let mut array = [0.0; 32]; + _mm512_storeu_ph(array.as_mut_ptr(), a); + assert_eq_m512h(a, _mm512_loadu_ph(array.as_ptr())); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_add_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_add_ph(a, b); + let e = _mm_set1_ph(9.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_add_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_add_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(10., 9., 12., 9., 14., 9., 16., 9.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_add_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_maskz_add_ph(0b01010101, a, b); + let e = _mm_set_ph(0., 9., 0., 9., 0., 9., 0., 9.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_add_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_add_ph(a, b); + let e = _mm256_set1_ph(17.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_add_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let src = _mm256_set_ph( + 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + ); + let r = _mm256_mask_add_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 18., 17., 20., 17., 22., 17., 24., 17., 26., 17., 28., 17., 30., 17., 32., 17., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_add_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_maskz_add_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_add_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_add_ph(a, b); + let e = _mm512_set1_ph(33.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_add_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_add_ph(src, 0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 34., 33., 36., 33., 38., 33., 40., 33., 42., 33., 44., 33., 46., 33., 48., 33., 50., + 33., 52., 33., 54., 33., 56., 33., 58., 33., 60., 33., 62., 33., 64., 33., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_add_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_add_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., + 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_add_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_ph(33.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_add_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 34., 33., 36., 33., 38., 33., 40., 33., 42., 33., 44., 33., 46., 33., 48., 33., 50., + 33., 52., 33., 54., 33., 56., 33., 58., 33., 60., 33., 62., 33., 64., 33., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_add_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., + 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_add_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_add_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_add_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = + _mm_maskz_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_add_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_add_sh(a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_add_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_add_sh(src, 0, a, b); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_add_sh(src, 1, a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_add_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_maskz_add_sh(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_add_sh(1, a, b); + let e = _mm_set_sh(3.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_sub_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_sub_ph(a, b); + let e = _mm_set_ph(-7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_sub_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_sub_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(10., -5., 12., -1., 14., 3., 16., 7.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_sub_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_maskz_sub_ph(0b01010101, a, b); + let e = _mm_set_ph(0., -5., 0., -1., 0., 3., 0., 7.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_sub_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_sub_ph(a, b); + let e = _mm256_set_ph( + -15.0, -13.0, -11.0, -9.0, -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, + 15.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_sub_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let src = _mm256_set_ph( + 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + ); + let r = _mm256_mask_sub_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 18., -13., 20., -9., 22., -5., 24., -1., 26., 3., 28., 7., 30., 11., 32., 15., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_sub_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_maskz_sub_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., 0., 7., 0., 11., 0., 15., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_sub_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_sub_ph(a, b); + let e = _mm512_set_ph( + -31.0, -29.0, -27.0, -25.0, -23.0, -21.0, -19.0, -17.0, -15.0, -13.0, -11.0, -9.0, + -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, + 23.0, 25.0, 27.0, 29.0, 31.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_sub_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_sub_ph(src, 0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 34., -29., 36., -25., 38., -21., 40., -17., 42., -13., 44., -9., 46., -5., 48., -1., + 50., 3., 52., 7., 54., 11., 56., 15., 58., 19., 60., 23., 62., 27., 64., 31., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_sub_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_sub_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0., -29., 0., -25., 0., -21., 0., -17., 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., + 0., 7., 0., 11., 0., 15., 0., 19., 0., 23., 0., 27., 0., 31., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_sub_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set_ph( + -31.0, -29.0, -27.0, -25.0, -23.0, -21.0, -19.0, -17.0, -15.0, -13.0, -11.0, -9.0, + -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, + 23.0, 25.0, 27.0, 29.0, 31.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_sub_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 34., -29., 36., -25., 38., -21., 40., -17., 42., -13., 44., -9., 46., -5., 48., -1., + 50., 3., 52., 7., 54., 11., 56., 15., 58., 19., 60., 23., 62., 27., 64., 31., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_sub_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 0., -29., 0., -25., 0., -21., 0., -17., 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., + 0., 7., 0., 11., 0., 15., 0., 19., 0., 23., 0., 27., 0., 31., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_sub_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_sub_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_sub_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = + _mm_maskz_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_sub_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_sub_sh(a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_sub_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_sub_sh(src, 0, a, b); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_sub_sh(src, 1, a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_sub_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_maskz_sub_sh(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_sub_sh(1, a, b); + let e = _mm_set_sh(-1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mul_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_mul_ph(a, b); + let e = _mm_set_ph(8.0, 14.0, 18.0, 20.0, 20.0, 18.0, 14.0, 8.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_mul_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_mul_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(10., 14., 12., 20., 14., 18., 16., 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_mul_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + let r = _mm_maskz_mul_ph(0b01010101, a, b); + let e = _mm_set_ph(0., 14., 0., 20., 0., 18., 0., 8.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mul_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_mul_ph(a, b); + let e = _mm256_set_ph( + 16.0, 30.0, 42.0, 52.0, 60.0, 66.0, 70.0, 72.0, 72.0, 70.0, 66.0, 60.0, 52.0, 42.0, + 30.0, 16.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_mul_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let src = _mm256_set_ph( + 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., + ); + let r = _mm256_mask_mul_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 18., 30., 20., 52., 22., 66., 24., 72., 26., 70., 28., 60., 30., 42., 32., 16., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_mul_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let b = _mm256_set_ph( + 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + ); + let r = _mm256_maskz_mul_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0., 30., 0., 52., 0., 66., 0., 72., 0., 70., 0., 60., 0., 42., 0., 16., + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mul_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_mul_ph(a, b); + let e = _mm512_set_ph( + 32.0, 62.0, 90.0, 116.0, 140.0, 162.0, 182.0, 200.0, 216.0, 230.0, 242.0, 252.0, 260.0, + 266.0, 270.0, 272.0, 272.0, 270.0, 266.0, 260.0, 252.0, 242.0, 230.0, 216.0, 200.0, + 182.0, 162.0, 140.0, 116.0, 90.0, 62.0, 32.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_mul_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_mul_ph(src, 0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 34., 62., 36., 116., 38., 162., 40., 200., 42., 230., 44., 252., 46., 266., 48., 272., + 50., 270., 52., 260., 54., 242., 56., 216., 58., 182., 60., 140., 62., 90., 64., 32., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_mul_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_mul_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0., 62., 0., 116., 0., 162., 0., 200., 0., 230., 0., 252., 0., 266., 0., 272., 0., + 270., 0., 260., 0., 242., 0., 216., 0., 182., 0., 140., 0., 90., 0., 32., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mul_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set_ph( + 32.0, 62.0, 90.0, 116.0, 140.0, 162.0, 182.0, 200.0, 216.0, 230.0, 242.0, 252.0, 260.0, + 266.0, 270.0, 272.0, 272.0, 270.0, 266.0, 260.0, 252.0, 242.0, 230.0, 216.0, 200.0, + 182.0, 162.0, 140.0, 116.0, 90.0, 62.0, 32.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_mul_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let src = _mm512_set_ph( + 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., + 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + ); + let r = _mm512_mask_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 34., 62., 36., 116., 38., 162., 40., 200., 42., 230., 44., 252., 46., 266., 48., 272., + 50., 270., 52., 260., 54., 242., 56., 216., 58., 182., 60., 140., 62., 90., 64., 32., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_mul_round_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, + 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, + 3.0, 2.0, 1.0, + ); + let r = _mm512_maskz_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 0., 62., 0., 116., 0., 162., 0., 200., 0., 230., 0., 252., 0., 266., 0., 272., 0., + 270., 0., 260., 0., 242., 0., 216., 0., 182., 0., 140., 0., 90., 0., 32., + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mul_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_mul_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_mul_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = + _mm_maskz_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mul_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_mul_sh(a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_mul_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_mul_sh(src, 0, a, b); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_mul_sh(src, 1, a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_mul_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_maskz_mul_sh(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_mul_sh(1, a, b); + let e = _mm_set_sh(2.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_div_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let r = _mm_div_ph(a, b); + let e = _mm_set1_ph(0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_div_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let src = _mm_set_ph(4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0); + let r = _mm_mask_div_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_div_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let r = _mm_maskz_div_ph(0b01010101, a, b); + let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_div_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let r = _mm256_div_ph(a, b); + let e = _mm256_set1_ph(0.5); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_div_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let src = _mm256_set_ph( + 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, + 19.0, + ); + let r = _mm256_mask_div_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_div_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let r = _mm256_maskz_div_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_div_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_div_ph(a, b); + let e = _mm512_set1_ph(0.5); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_div_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let src = _mm512_set_ph( + 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, + 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, + 33.0, 34.0, 35.0, + ); + let r = _mm512_mask_div_ph(src, 0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, + 20.0, 0.5, 22.0, 0.5, 24.0, 0.5, 26.0, 0.5, 28.0, 0.5, 30.0, 0.5, 32.0, 0.5, 34.0, 0.5, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_div_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_maskz_div_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, + 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_div_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_ph(0.5); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_div_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let src = _mm512_set_ph( + 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, + 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, + 33.0, 34.0, 35.0, + ); + let r = _mm512_mask_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, + 20.0, 0.5, 22.0, 0.5, 24.0, 0.5, 26.0, 0.5, 28.0, 0.5, 30.0, 0.5, 32.0, 0.5, 34.0, 0.5, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_div_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let r = _mm512_maskz_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, + 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_div_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_set_sh(0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_div_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_set_sh(0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_div_round_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = + _mm_maskz_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_set_sh(0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_div_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_div_sh(a, b); + let e = _mm_set_sh(0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_div_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let src = _mm_set_sh(4.0); + let r = _mm_mask_div_sh(src, 0, a, b); + let e = _mm_set_sh(4.0); + assert_eq_m128h(r, e); + let r = _mm_mask_div_sh(src, 1, a, b); + let e = _mm_set_sh(0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_div_sh() { + let a = _mm_set_sh(1.0); + let b = _mm_set_sh(2.0); + let r = _mm_maskz_div_sh(0, a, b); + let e = _mm_set_sh(0.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_div_sh(1, a, b); + let e = _mm_set_sh(0.5); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let r = _mm_mul_pch(a, b); + let e = _mm_set1_pch(-1.0, 0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_mul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); + let r = _mm_mask_mul_pch(src, 0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); + assert_eq_m128h(r, e); + } - #[link_name = "llvm.x86.avx512fp16.mask.scalef.ph.128"] - fn vscalefph_128(a: __m128h, b: __m128h, src: __m128h, k: __mmask8) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.scalef.ph.256"] - fn vscalefph_256(a: __m256h, b: __m256h, src: __m256h, k: __mmask16) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.mask.scalef.ph.512"] - fn vscalefph_512(a: __m512h, b: __m512h, src: __m512h, k: __mmask32, rounding: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mask.scalef.sh"] - fn vscalefsh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_mul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let r = _mm_maskz_mul_pch(0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); + assert_eq_m128h(r, e); + } - #[link_name = "llvm.x86.avx512fp16.mask.reduce.ph.128"] - fn vreduceph_128(a: __m128h, imm8: i32, src: __m128h, k: __mmask8) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.reduce.ph.256"] - fn vreduceph_256(a: __m256h, imm8: i32, src: __m256h, k: __mmask16) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.mask.reduce.ph.512"] - fn vreduceph_512(a: __m512h, imm8: i32, src: __m512h, k: __mmask32, sae: i32) -> __m512h; - #[link_name = "llvm.x86.avx512fp16.mask.reduce.sh"] - fn vreducesh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, imm8: i32, sae: i32) - -> __m128h; + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_mul_pch(a, b); + let e = _mm256_set1_pch(-1.0, 0.0); + assert_eq_m256h(r, e); + } - #[link_name = "llvm.x86.avx512fp16.mask.fpclass.sh"] - fn vfpclasssh(a: __m128h, imm8: i32, k: __mmask8) -> __mmask8; + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_mul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let src = _mm256_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + ); + let r = _mm256_mask_mul_pch(src, 0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + ); + assert_eq_m256h(r, e); + } - #[link_name = "llvm.x86.avx512.sitofp.round.v8f16.v8i16"] - fn vcvtw2ph_128(a: i16x8, rounding: i32) -> __m128h; - #[link_name = "llvm.x86.avx512.sitofp.round.v16f16.v16i16"] - fn vcvtw2ph_256(a: i16x16, rounding: i32) -> __m256h; - #[link_name = "llvm.x86.avx512.sitofp.round.v32f16.v32i16"] - fn vcvtw2ph_512(a: i16x32, rounding: i32) -> __m512h; - #[link_name = "llvm.x86.avx512.uitofp.round.v8f16.v8u16"] - fn vcvtuw2ph_128(a: u16x8, rounding: i32) -> __m128h; - #[link_name = "llvm.x86.avx512.uitofp.round.v16f16.v16u16"] - fn vcvtuw2ph_256(a: u16x16, rounding: i32) -> __m256h; - #[link_name = "llvm.x86.avx512.uitofp.round.v32f16.v32u16"] - fn vcvtuw2ph_512(a: u16x32, rounding: i32) -> __m512h; + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_mul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_maskz_mul_pch(0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + ); + assert_eq_m256h(r, e); + } - #[link_name = "llvm.x86.avx512fp16.mask.vcvtdq2ph.128"] - fn vcvtdq2ph_128(a: i32x4, src: __m128h, k: __mmask8) -> __m128h; - #[link_name = "llvm.x86.avx512.sitofp.round.v8f16.v8i32"] - fn vcvtdq2ph_256(a: i32x8, rounding: i32) -> __m128h; - #[link_name = "llvm.x86.avx512.sitofp.round.v16f16.v16i32"] - fn vcvtdq2ph_512(a: i32x16, rounding: i32) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.vcvtsi2sh"] - fn vcvtsi2sh(a: __m128h, b: i32, rounding: i32) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.vcvtudq2ph.128"] - fn vcvtudq2ph_128(a: u32x4, src: __m128h, k: __mmask8) -> __m128h; - #[link_name = "llvm.x86.avx512.uitofp.round.v8f16.v8u32"] - fn vcvtudq2ph_256(a: u32x8, rounding: i32) -> __m128h; - #[link_name = "llvm.x86.avx512.uitofp.round.v16f16.v16u32"] - fn vcvtudq2ph_512(a: u32x16, rounding: i32) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.vcvtusi2sh"] - fn vcvtusi2sh(a: __m128h, b: u32, rounding: i32) -> __m128h; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_mul_pch(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); + assert_eq_m512h(r, e); + } - #[link_name = "llvm.x86.avx512fp16.mask.vcvtqq2ph.128"] - fn vcvtqq2ph_128(a: i64x2, src: __m128h, k: __mmask8) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.vcvtqq2ph.256"] - fn vcvtqq2ph_256(a: i64x4, src: __m128h, k: __mmask8) -> __m128h; - #[link_name = "llvm.x86.avx512.sitofp.round.v8f16.v8i64"] - fn vcvtqq2ph_512(a: i64x8, rounding: i32) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.vcvtuqq2ph.128"] - fn vcvtuqq2ph_128(a: u64x2, src: __m128h, k: __mmask8) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.vcvtuqq2ph.256"] - fn vcvtuqq2ph_256(a: u64x4, src: __m128h, k: __mmask8) -> __m128h; - #[link_name = "llvm.x86.avx512.uitofp.round.v8f16.v8u64"] - fn vcvtuqq2ph_512(a: u64x8, rounding: i32) -> __m128h; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_mul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, + ); + let r = _mm512_mask_mul_pch(src, 0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, + ); + assert_eq_m512h(r, e); + } - #[link_name = "llvm.x86.avx512fp16.mask.vcvtps2phx.128"] - fn vcvtps2phx_128(a: __m128, src: __m128h, k: __mmask8) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.vcvtps2phx.256"] - fn vcvtps2phx_256(a: __m256, src: __m128h, k: __mmask8) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.vcvtps2phx.512"] - fn vcvtps2phx_512(a: __m512, src: __m256h, k: __mmask16, rounding: i32) -> __m256h; - #[link_name = "llvm.x86.avx512fp16.mask.vcvtss2sh.round"] - fn vcvtss2sh(a: __m128h, b: __m128, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_mul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_mul_pch(0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + ); + assert_eq_m512h(r, e); + } - #[link_name = "llvm.x86.avx512fp16.mask.vcvtpd2ph.128"] - fn vcvtpd2ph_128(a: __m128d, src: __m128h, k: __mmask8) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.vcvtpd2ph.256"] - fn vcvtpd2ph_256(a: __m256d, src: __m128h, k: __mmask8) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.vcvtpd2ph.512"] - fn vcvtpd2ph_512(a: __m512d, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; - #[link_name = "llvm.x86.avx512fp16.mask.vcvtsd2sh.round"] - fn vcvtsd2sh(a: __m128h, b: __m128d, src: __m128h, k: __mmask8, rounding: i32) -> __m128h; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_mul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, + ); + let r = _mm512_mask_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b0101010101010101, + a, + b, + ); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_mul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, + a, + b, + ); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_mul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_mul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = + _mm_maskz_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + } -} + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_mul_sch(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + } -#[cfg(test)] -mod tests { - use crate::core_arch::x86::*; - use crate::mem::transmute; - use crate::ptr::{addr_of, addr_of_mut}; - use stdarch_test::simd_test; + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_mul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_mul_sch(src, 0, a, b); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + } - #[target_feature(enable = "avx512fp16")] - unsafe fn _mm_set1_pch(re: f16, im: f16) -> __m128h { - _mm_setr_ph(re, im, re, im, re, im, re, im) + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_mul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_maskz_mul_sch(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } - #[target_feature(enable = "avx512fp16")] - unsafe fn _mm256_set1_pch(re: f16, im: f16) -> __m256h { - _mm256_setr_ph( - re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, - ) + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_fmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let r = _mm_fmul_pch(a, b); + let e = _mm_set1_pch(-1.0, 0.0); + assert_eq_m128h(r, e); } - #[target_feature(enable = "avx512fp16")] - unsafe fn _mm512_set1_pch(re: f16, im: f16) -> __m512h { - _mm512_setr_ph( - re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, - re, im, re, im, re, im, re, im, re, im, - ) + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_fmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); + let r = _mm_mask_fmul_pch(src, 0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_set_ph() { - let r = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let e = _mm_setr_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_fmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 1.0); + let r = _mm_maskz_fmul_pch(0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_set_ph() { - let r = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_fmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_fmul_pch(a, b); + let e = _mm256_set1_pch(-1.0, 0.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_fmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let src = _mm256_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, ); + let r = _mm256_mask_fmul_pch(src, 0b01010101, a, b); let e = _mm256_setr_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_fmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_maskz_fmul_pch(0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_set_ph() { - let r = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, + unsafe fn test_mm512_fmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_fmul_pch(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_fmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, ); + let r = _mm512_mask_fmul_pch(src, 0b0101010101010101, a, b); let e = _mm512_setr_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_set_sh() { - let r = _mm_set_sh(1.0); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_maskz_fmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_fmul_pch(0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_set1_ph() { - let r = _mm_set1_ph(1.0); - let e = _mm_set_ph(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_fmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_set1_ph() { - let r = _mm256_set1_ph(1.0); - let e = _mm256_set_ph( - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + unsafe fn test_mm512_mask_fmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, ); - assert_eq_m256h(r, e); + let r = _mm512_mask_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b0101010101010101, + a, + b, + ); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_set1_ph() { - let r = _mm512_set1_ph(1.0); - let e = _mm512_set_ph( - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + unsafe fn test_mm512_maskz_fmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, + a, + b, + ); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_setr_ph() { - let r = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let e = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); + unsafe fn test_mm_fmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_setr_ph() { - let r = _mm256_setr_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let e = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + unsafe fn test_mm_mask_fmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, ); - assert_eq_m256h(r, e); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_setr_ph() { - let r = _mm512_setr_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let e = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_setzero_ph() { - let r = _mm_setzero_ph(); - let e = _mm_set1_ph(0.0); + unsafe fn test_mm_maskz_fmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = + _mm_maskz_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_setzero_ph() { - let r = _mm256_setzero_ph(); - let e = _mm256_set1_ph(0.0); - assert_eq_m256h(r, e); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_fmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_fmul_sch(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_setzero_ph() { - let r = _mm512_setzero_ph(); - let e = _mm512_set1_ph(0.0); - assert_eq_m512h(r, e); + unsafe fn test_mm_mask_fmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_fmul_sch(src, 0, a, b); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castsi128_ph() { - let a = _mm_set1_epi16(0x3c00); - let r = _mm_castsi128_ph(a); - let e = _mm_set1_ph(1.0); + unsafe fn test_mm_maskz_fmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let r = _mm_maskz_fmul_sch(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castsi256_ph() { - let a = _mm256_set1_epi16(0x3c00); - let r = _mm256_castsi256_ph(a); - let e = _mm256_set1_ph(1.0); - assert_eq_m256h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_cmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let r = _mm_cmul_pch(a, b); + let e = _mm_set1_pch(-1.0, 0.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castsi512_ph() { - let a = _mm512_set1_epi16(0x3c00); - let r = _mm512_castsi512_ph(a); - let e = _mm512_set1_ph(1.0); - assert_eq_m512h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_cmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); + let r = _mm_mask_cmul_pch(src, 0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castph_si128() { - let a = _mm_set1_ph(1.0); - let r = _mm_castph_si128(a); - let e = _mm_set1_epi16(0x3c00); - assert_eq_m128i(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_cmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let r = _mm_maskz_cmul_pch(0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph_si256() { - let a = _mm256_set1_ph(1.0); - let r = _mm256_castph_si256(a); - let e = _mm256_set1_epi16(0x3c00); - assert_eq_m256i(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_cmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let r = _mm256_cmul_pch(a, b); + let e = _mm256_set1_pch(-1.0, 0.0); + assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph_si512() { - let a = _mm512_set1_ph(1.0); - let r = _mm512_castph_si512(a); - let e = _mm512_set1_epi16(0x3c00); - assert_eq_m512i(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_cmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let src = _mm256_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + ); + let r = _mm256_mask_cmul_pch(src, 0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + ); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_cmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let r = _mm256_maskz_cmul_pch(0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castps_ph() { - let a = _mm_castsi128_ps(_mm_set1_epi16(0x3c00)); - let r = _mm_castps_ph(a); - let e = _mm_set1_ph(1.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_cmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_cmul_pch(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castps_ph() { - let a = _mm256_castsi256_ps(_mm256_set1_epi16(0x3c00)); - let r = _mm256_castps_ph(a); - let e = _mm256_set1_ph(1.0); - assert_eq_m256h(r, e); + unsafe fn test_mm512_mask_cmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, + ); + let r = _mm512_mask_cmul_pch(src, 0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castps_ph() { - let a = _mm512_castsi512_ps(_mm512_set1_epi16(0x3c00)); - let r = _mm512_castps_ph(a); - let e = _mm512_set1_ph(1.0); + unsafe fn test_mm512_maskz_cmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_maskz_cmul_pch(0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castph_ps() { - let a = _mm_castsi128_ph(_mm_set1_epi32(0x3f800000)); - let r = _mm_castph_ps(a); - let e = _mm_set1_ps(1.0); - assert_eq_m128(r, e); + unsafe fn test_mm512_cmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph_ps() { - let a = _mm256_castsi256_ph(_mm256_set1_epi32(0x3f800000)); - let r = _mm256_castph_ps(a); - let e = _mm256_set1_ps(1.0); - assert_eq_m256(r, e); + unsafe fn test_mm512_mask_cmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, + ); + let r = _mm512_mask_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b0101010101010101, + a, + b, + ); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph_ps() { - let a = _mm512_castsi512_ph(_mm512_set1_epi32(0x3f800000)); - let r = _mm512_castph_ps(a); - let e = _mm512_set1_ps(1.0); - assert_eq_m512(r, e); + unsafe fn test_mm512_maskz_cmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_maskz_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, + a, + b, + ); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castpd_ph() { - let a = _mm_castsi128_pd(_mm_set1_epi16(0x3c00)); - let r = _mm_castpd_ph(a); - let e = _mm_set1_ph(1.0); + unsafe fn test_mm_cmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_cmul_sch(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castpd_ph() { - let a = _mm256_castsi256_pd(_mm256_set1_epi16(0x3c00)); - let r = _mm256_castpd_ph(a); - let e = _mm256_set1_ph(1.0); - assert_eq_m256h(r, e); + unsafe fn test_mm_mask_cmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_cmul_sch(src, 0, a, b); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castpd_ph() { - let a = _mm512_castsi512_pd(_mm512_set1_epi16(0x3c00)); - let r = _mm512_castpd_ph(a); - let e = _mm512_set1_ph(1.0); - assert_eq_m512h(r, e); + unsafe fn test_mm_maskz_cmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_maskz_cmul_sch(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_castph_pd() { - let a = _mm_castsi128_ph(_mm_set1_epi64x(0x3ff0000000000000)); - let r = _mm_castph_pd(a); - let e = _mm_set1_pd(1.0); - assert_eq_m128d(r, e); + unsafe fn test_mm_cmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph_pd() { - let a = _mm256_castsi256_ph(_mm256_set1_epi64x(0x3ff0000000000000)); - let r = _mm256_castph_pd(a); - let e = _mm256_set1_pd(1.0); - assert_eq_m256d(r, e); + unsafe fn test_mm_mask_cmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph_pd() { - let a = _mm512_castsi512_ph(_mm512_set1_epi64(0x3ff0000000000000)); - let r = _mm512_castph_pd(a); - let e = _mm512_set1_pd(1.0); - assert_eq_m512d(r, e); + unsafe fn test_mm_maskz_cmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = + _mm_maskz_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph256_ph128() { - let a = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., - ); - let r = _mm256_castph256_ph128(a); - let e = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_fcmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let r = _mm_fcmul_pch(a, b); + let e = _mm_set1_pch(-1.0, 0.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph512_ph128() { - let a = _mm512_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., - 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., - ); - let r = _mm512_castph512_ph128(a); - let e = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_fcmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); + let r = _mm_mask_fcmul_pch(src, 0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph512_ph256() { - let a = _mm512_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., - 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_fcmul_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, -1.0); + let r = _mm_maskz_fcmul_pch(0b0101, a, b); + let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_fcmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let r = _mm256_fcmul_pch(a, b); + let e = _mm256_set1_pch(-1.0, 0.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_fcmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let src = _mm256_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, ); - let r = _mm512_castph512_ph256(a); + let r = _mm256_mask_fcmul_pch(src, 0b01010101, a, b); let e = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, ); assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_castph128_ph256() { - let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - let r = _mm256_castph128_ph256(a); - assert_eq_m128h(_mm256_castph256_ph128(r), a); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_fcmul_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, -1.0); + let r = _mm256_maskz_fcmul_pch(0b01010101, a, b); + let e = _mm256_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph128_ph512() { - let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - let r = _mm512_castph128_ph512(a); - assert_eq_m128h(_mm512_castph512_ph128(r), a); + unsafe fn test_mm512_fcmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_fcmul_pch(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_castph256_ph512() { - let a = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + unsafe fn test_mm512_mask_fcmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, ); - let r = _mm512_castph256_ph512(a); - assert_eq_m256h(_mm512_castph512_ph256(r), a); + let r = _mm512_mask_fcmul_pch(src, 0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm256_zextph128_ph256() { - let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - let r = _mm256_zextph128_ph256(a); - let e = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 0., 0., 0., 0., 0., 0., 0., 0., + unsafe fn test_mm512_maskz_fcmul_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_maskz_fcmul_pch(0b0101010101010101, a, b); + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); - assert_eq_m256h(r, e); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_zextph128_ph512() { - let a = _mm_setr_ph(1., 2., 3., 4., 5., 6., 7., 8.); - let r = _mm512_zextph128_ph512(a); - let e = _mm512_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., - 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., - ); + unsafe fn test_mm512_fcmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_pch(-1.0, 0.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_zextph256_ph512() { - let a = _mm256_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., + unsafe fn test_mm512_mask_fcmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, + ); + let r = _mm512_mask_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b0101010101010101, + a, + b, ); - let r = _mm512_zextph256_ph512(a); let e = _mm512_setr_ph( - 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 0., 0., 0., 0., - 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, + 33.0, ); assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_cmp_ph_mask() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0); - let r = _mm_cmp_ph_mask::<_CMP_EQ_OQ>(a, b); - assert_eq!(r, 0b11110000); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_cmp_ph_mask() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0); - let r = _mm_mask_cmp_ph_mask::<_CMP_EQ_OQ>(0b01010101, a, b); - assert_eq!(r, 0b01010000); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_cmp_ph_mask() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_fcmul_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, -1.0); + let r = _mm512_maskz_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, + a, + b, ); - let b = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0, 9.0, 10.0, 11.0, 12.0, -13.0, -14.0, -15.0, - -16.0, + let e = _mm512_setr_ph( + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, ); - let r = _mm256_cmp_ph_mask::<_CMP_EQ_OQ>(a, b); - assert_eq!(r, 0b1111000011110000); + assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_cmp_ph_mask() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0, 9.0, 10.0, 11.0, 12.0, -13.0, -14.0, -15.0, - -16.0, - ); - let r = _mm256_mask_cmp_ph_mask::<_CMP_EQ_OQ>(0b0101010101010101, a, b); - assert_eq!(r, 0b0101000001010000); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_fcmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_fcmul_sch(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cmp_ph_mask() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0, 9.0, 10.0, 11.0, 12.0, -13.0, -14.0, -15.0, - -16.0, 17.0, 18.0, 19.0, 20.0, -21.0, -22.0, -23.0, -24.0, 25.0, 26.0, 27.0, 28.0, - -29.0, -30.0, -31.0, -32.0, - ); - let r = _mm512_cmp_ph_mask::<_CMP_EQ_OQ>(a, b); - assert_eq!(r, 0b11110000111100001111000011110000); + unsafe fn test_mm_mask_fcmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_fcmul_sch(src, 0, a, b); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cmp_ph_mask() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0, 9.0, 10.0, 11.0, 12.0, -13.0, -14.0, -15.0, - -16.0, 17.0, 18.0, 19.0, 20.0, -21.0, -22.0, -23.0, -24.0, 25.0, 26.0, 27.0, 28.0, - -29.0, -30.0, -31.0, -32.0, - ); - let r = _mm512_mask_cmp_ph_mask::<_CMP_EQ_OQ>(0b01010101010101010101010101010101, a, b); - assert_eq!(r, 0b01010000010100000101000001010000); + unsafe fn test_mm_maskz_fcmul_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_maskz_fcmul_sch(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cmp_round_sh_mask() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_cmp_round_sh_mask::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(a, b); - assert_eq!(r, 1); + unsafe fn test_mm_fcmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = _mm_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_cmp_round_sh_mask() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_mask_cmp_round_sh_mask::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(0, a, b); - assert_eq!(r, 0); + unsafe fn test_mm_mask_fcmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); + let r = _mm_mask_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cmp_sh_mask() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_cmp_sh_mask::<_CMP_EQ_OQ>(a, b); - assert_eq!(r, 1); + unsafe fn test_mm_maskz_fcmul_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); + let r = + _mm_maskz_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_cmp_sh_mask() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_mask_cmp_sh_mask::<_CMP_EQ_OQ>(0, a, b); - assert_eq!(r, 0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_abs_ph() { + let a = _mm_set_ph(-1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0); + let r = _mm_abs_ph(a); + let e = _mm_set_ph(1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comi_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_comi_round_sh::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(a, b); - assert_eq!(r, 1); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_abs_ph() { + let a = _mm256_set_ph( + -1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, + -14.0, + ); + let r = _mm256_abs_ph(a); + let e = _mm256_set_ph( + 1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comi_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_comi_sh::<_CMP_EQ_OQ>(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_abs_ph() { + let a = _mm512_set_ph( + -1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, + -14.0, 15.0, -16.0, 17.0, -18.0, 19.0, -20.0, 21.0, -22.0, 23.0, -24.0, 25.0, -26.0, + 27.0, -28.0, 29.0, -30.0, + ); + let r = _mm512_abs_ph(a); + let e = _mm512_set_ph( + 1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, + 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, + 29.0, 30.0, + ); + assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comieq_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_comieq_sh(a, b); - assert_eq!(r, 1); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_conj_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let r = _mm_conj_pch(a); + let e = _mm_set1_pch(0.0, -1.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comige_sh() { - let a = _mm_set_sh(2.0); - let b = _mm_set_sh(1.0); - let r = _mm_comige_sh(a, b); - assert_eq!(r, 1); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_conj_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); + let r = _mm_mask_conj_pch(src, 0b0101, a); + let e = _mm_setr_ph(0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comigt_sh() { - let a = _mm_set_sh(2.0); - let b = _mm_set_sh(1.0); - let r = _mm_comigt_sh(a, b); - assert_eq!(r, 1); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_conj_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let r = _mm_maskz_conj_pch(0b0101, a); + let e = _mm_setr_ph(0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comile_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_comile_sh(a, b); - assert_eq!(r, 1); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_conj_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_conj_pch(a); + let e = _mm256_set1_pch(0.0, -1.0); + assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comilt_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_comilt_sh(a, b); - assert_eq!(r, 1); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_conj_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let src = _mm256_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + ); + let r = _mm256_mask_conj_pch(src, 0b01010101, a); + let e = _mm256_setr_ph( + 0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0, 0.0, -1.0, 12.0, 13.0, 0.0, -1.0, 16.0, 17.0, + ); + assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_comineq_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_comineq_sh(a, b); - assert_eq!(r, 1); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_conj_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let r = _mm256_maskz_conj_pch(0b01010101, a); + let e = _mm256_setr_ph( + 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomieq_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(1.0); - let r = _mm_ucomieq_sh(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_conj_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_conj_pch(a); + let e = _mm512_set1_pch(0.0, -1.0); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomige_sh() { - let a = _mm_set_sh(2.0); - let b = _mm_set_sh(1.0); - let r = _mm_ucomige_sh(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_mask_conj_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let src = _mm512_setr_ph( + 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, + 32.0, 33.0, + ); + let r = _mm512_mask_conj_pch(src, 0b0101010101010101, a); + let e = _mm512_setr_ph( + 0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0, 0.0, -1.0, 12.0, 13.0, 0.0, -1.0, 16.0, 17.0, + 0.0, -1.0, 20.0, 21.0, 0.0, -1.0, 24.0, 25.0, 0.0, -1.0, 28.0, 29.0, 0.0, -1.0, 32.0, + 33.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomigt_sh() { - let a = _mm_set_sh(2.0); - let b = _mm_set_sh(1.0); - let r = _mm_ucomigt_sh(a, b); - assert_eq!(r, 1); + unsafe fn test_mm512_maskz_conj_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let r = _mm512_maskz_conj_pch(0b0101010101010101, a); + let e = _mm512_setr_ph( + 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, + 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, + ); + assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomile_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_ucomile_sh(a, b); - assert_eq!(r, 1); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_fmadd_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_fmadd_pch(a, b, c); + let e = _mm_set1_pch(-2.0, 3.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomilt_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_ucomilt_sh(a, b); - assert_eq!(r, 1); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_fmadd_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_mask_fmadd_pch(a, 0b0101, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask3_fmadd_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_mask3_fmadd_pch(a, b, c, 0b0101); + let e = _mm_setr_ph(-2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_fmadd_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_maskz_fmadd_pch(0b0101, a, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_ucomineq_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_ucomineq_sh(a, b); - assert_eq!(r, 1); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_fmadd_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_fmadd_pch(a, b, c); + let e = _mm256_set1_pch(-2.0, 3.0); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_load_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_load_ph(addr_of!(a).cast()); - assert_eq_m128h(a, b); + unsafe fn test_mm256_mask_fmadd_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_mask_fmadd_pch(a, 0b01010101, b, c); + let e = _mm256_setr_ph( + -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_load_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + unsafe fn test_mm256_mask3_fmadd_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_mask3_fmadd_pch(a, b, c, 0b01010101); + let e = _mm256_setr_ph( + -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, ); - let b = _mm256_load_ph(addr_of!(a).cast()); - assert_eq_m256h(a, b); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_fmadd_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_maskz_fmadd_pch(0b01010101, a, b, c); + let e = _mm256_setr_ph( + -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_load_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, + unsafe fn test_mm512_fmadd_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_fmadd_pch(a, b, c); + let e = _mm512_set1_pch(-2.0, 3.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_fmadd_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask_fmadd_pch(a, 0b0101010101010101, b, c); + let e = _mm512_setr_ph( + -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, + -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, ); - let b = _mm512_load_ph(addr_of!(a).cast()); - assert_eq_m512h(a, b); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_load_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_load_sh(addr_of!(a).cast()); - assert_eq_m128h(a, b); + unsafe fn test_mm512_mask3_fmadd_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask3_fmadd_pch(a, b, c, 0b0101010101010101); + let e = _mm512_setr_ph( + -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, + -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_load_sh() { - let a = _mm_set_sh(1.0); - let src = _mm_set_sh(2.); - let b = _mm_mask_load_sh(src, 1, addr_of!(a).cast()); - assert_eq_m128h(a, b); - let b = _mm_mask_load_sh(src, 0, addr_of!(a).cast()); - assert_eq_m128h(src, b); + unsafe fn test_mm512_maskz_fmadd_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_maskz_fmadd_pch(0b0101010101010101, a, b, c); + let e = _mm512_setr_ph( + -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, + -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_load_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_maskz_load_sh(1, addr_of!(a).cast()); - assert_eq_m128h(a, b); - let b = _mm_maskz_load_sh(0, addr_of!(a).cast()); - assert_eq_m128h(_mm_setzero_ph(), b); + unsafe fn test_mm512_fmadd_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = + _mm512_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_pch(-2.0, 3.0); + assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_loadu_ph() { - let array = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]; - let r = _mm_loadu_ph(array.as_ptr()); - let e = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - assert_eq_m128h(r, e); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_fmadd_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, + 0b0101010101010101, + b, + c, + ); + let e = _mm512_setr_ph( + -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, + -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, + ); + assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_loadu_ph() { - let array = [ - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ]; - let r = _mm256_loadu_ph(array.as_ptr()); - let e = _mm256_setr_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask3_fmadd_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask3_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, + b, + c, + 0b0101010101010101, ); - assert_eq_m256h(r, e); + let e = _mm512_setr_ph( + -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, + -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_loadu_ph() { - let array = [ - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ]; - let r = _mm512_loadu_ph(array.as_ptr()); + unsafe fn test_mm512_maskz_fmadd_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_maskz_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, + a, + b, + c, + ); let e = _mm512_setr_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, + -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, + -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_move_sh() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_sh(9.0); - let r = _mm_move_sh(a, b); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0); + unsafe fn test_mm_fmadd_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_fmadd_sch(a, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_move_sh() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_sh(9.0); - let src = _mm_set_sh(10.0); - let r = _mm_mask_move_sh(src, 0, a, b); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 10.0); + unsafe fn test_mm_mask_fmadd_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask_fmadd_sch(a, 0, b, c); + let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + let r = _mm_mask_fmadd_sch(a, 1, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_move_sh() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_sh(9.0); - let r = _mm_maskz_move_sh(0, a, b); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 0.0); + unsafe fn test_mm_mask3_fmadd_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask3_fmadd_sch(a, b, c, 0); + let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + let r = _mm_mask3_fmadd_sch(a, b, c, 1); + let e = _mm_setr_ph(-2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_store_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let mut b = _mm_setzero_ph(); - _mm_store_ph(addr_of_mut!(b).cast(), a); - assert_eq_m128h(a, b); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_fmadd_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_maskz_fmadd_sch(0, a, b, c); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_fmadd_sch(1, a, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_store_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let mut b = _mm256_setzero_ph(); - _mm256_store_ph(addr_of_mut!(b).cast(), a); - assert_eq_m256h(a, b); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_fmadd_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_store_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, + unsafe fn test_mm_mask_fmadd_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0, b, c, ); - let mut b = _mm512_setzero_ph(); - _mm512_store_ph(addr_of_mut!(b).cast(), a); - assert_eq_m512h(a, b); + let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + let r = _mm_mask_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 1, b, c, + ); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_store_sh() { - let a = _mm_set_sh(1.0); - let mut b = _mm_setzero_ph(); - _mm_store_sh(addr_of_mut!(b).cast(), a); - assert_eq_m128h(a, b); + unsafe fn test_mm_mask3_fmadd_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask3_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 0, + ); + let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + let r = _mm_mask3_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 1, + ); + let e = _mm_setr_ph(-2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_store_sh() { - let a = _mm_set_sh(1.0); - let mut b = _mm_setzero_ph(); - _mm_mask_store_sh(addr_of_mut!(b).cast(), 0, a); - assert_eq_m128h(_mm_setzero_ph(), b); - _mm_mask_store_sh(addr_of_mut!(b).cast(), 1, a); - assert_eq_m128h(a, b); + unsafe fn test_mm_maskz_fmadd_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_maskz_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0, a, b, c, + ); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 1, a, b, c, + ); + let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_storeu_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let mut array = [0.0; 8]; - _mm_storeu_ph(array.as_mut_ptr(), a); - assert_eq_m128h(a, _mm_loadu_ph(array.as_ptr())); + unsafe fn test_mm_fcmadd_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_fcmadd_pch(a, b, c); + let e = _mm_set1_pch(2.0, 3.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_storeu_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let mut array = [0.0; 16]; - _mm256_storeu_ph(array.as_mut_ptr(), a); - assert_eq_m256h(a, _mm256_loadu_ph(array.as_ptr())); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_storeu_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let mut array = [0.0; 32]; - _mm512_storeu_ph(array.as_mut_ptr(), a); - assert_eq_m512h(a, _mm512_loadu_ph(array.as_ptr())); + unsafe fn test_mm_mask_fcmadd_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_mask_fcmadd_pch(a, 0b0101, b, c); + let e = _mm_setr_ph(2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_add_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_add_ph(a, b); - let e = _mm_set1_ph(9.0); + unsafe fn test_mm_mask3_fcmadd_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_mask3_fcmadd_pch(a, b, c, 0b0101); + let e = _mm_setr_ph(2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_add_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm_mask_add_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(10., 9., 12., 9., 14., 9., 16., 9.); + unsafe fn test_mm_maskz_fcmadd_pch() { + let a = _mm_set1_pch(0.0, 1.0); + let b = _mm_set1_pch(0.0, 2.0); + let c = _mm_set1_pch(0.0, 3.0); + let r = _mm_maskz_fcmadd_pch(0b0101, a, b, c); + let e = _mm_setr_ph(2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_add_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_maskz_add_ph(0b01010101, a, b); - let e = _mm_set_ph(0., 9., 0., 9., 0., 9., 0., 9.); - assert_eq_m128h(r, e); + unsafe fn test_mm256_fcmadd_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_fcmadd_pch(a, b, c); + let e = _mm256_set1_pch(2.0, 3.0); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_add_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, + unsafe fn test_mm256_mask_fcmadd_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_mask_fcmadd_pch(a, 0b01010101, b, c); + let e = _mm256_setr_ph( + 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, ); - let r = _mm256_add_ph(a, b); - let e = _mm256_set1_ph(17.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_add_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let src = _mm256_set_ph( - 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., - ); - let r = _mm256_mask_add_ph(src, 0b0101010101010101, a, b); - let e = _mm256_set_ph( - 18., 17., 20., 17., 22., 17., 24., 17., 26., 17., 28., 17., 30., 17., 32., 17., + unsafe fn test_mm256_mask3_fcmadd_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_mask3_fcmadd_pch(a, b, c, 0b01010101); + let e = _mm256_setr_ph( + 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_add_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_maskz_add_ph(0b0101010101010101, a, b); - let e = _mm256_set_ph( - 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., 0., 17., + unsafe fn test_mm256_maskz_fcmadd_pch() { + let a = _mm256_set1_pch(0.0, 1.0); + let b = _mm256_set1_pch(0.0, 2.0); + let c = _mm256_set1_pch(0.0, 3.0); + let r = _mm256_maskz_fcmadd_pch(0b01010101, a, b, c); + let e = _mm256_setr_ph( + 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_add_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_add_ph(a, b); - let e = _mm512_set1_ph(33.0); + unsafe fn test_mm512_fcmadd_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_fcmadd_pch(a, b, c); + let e = _mm512_set1_pch(2.0, 3.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_add_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, + unsafe fn test_mm512_mask_fcmadd_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask_fcmadd_pch(a, 0b0101010101010101, b, c); + let e = _mm512_setr_ph( + 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, + 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask3_fcmadd_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask3_fcmadd_pch(a, b, c, 0b0101010101010101); + let e = _mm512_setr_ph( + 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, + 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, ); - let r = _mm512_mask_add_ph(src, 0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 34., 33., 36., 33., 38., 33., 40., 33., 42., 33., 44., 33., 46., 33., 48., 33., 50., - 33., 52., 33., 54., 33., 56., 33., 58., 33., 60., 33., 62., 33., 64., 33., + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_fcmadd_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_maskz_fcmadd_pch(0b0101010101010101, a, b, c); + let e = _mm512_setr_ph( + 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, + 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_add_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_add_ph(0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., - 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., - ); + unsafe fn test_mm512_fcmadd_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = + _mm512_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_pch(2.0, 3.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_add_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, + unsafe fn test_mm512_mask_fcmadd_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, + 0b0101010101010101, + b, + c, ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, + let e = _mm512_setr_ph( + 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, + 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, ); - let r = _mm512_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_ph(33.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_add_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., - ); - let r = _mm512_mask_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, - 0b01010101010101010101010101010101, + unsafe fn test_mm512_mask3_fcmadd_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_mask3_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, + c, + 0b0101010101010101, ); - let e = _mm512_set_ph( - 34., 33., 36., 33., 38., 33., 40., 33., 42., 33., 44., 33., 46., 33., 48., 33., 50., - 33., 52., 33., 54., 33., 56., 33., 58., 33., 60., 33., 62., 33., 64., 33., + let e = _mm512_setr_ph( + 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, + 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_add_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_add_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, + unsafe fn test_mm512_maskz_fcmadd_round_pch() { + let a = _mm512_set1_pch(0.0, 1.0); + let b = _mm512_set1_pch(0.0, 2.0); + let c = _mm512_set1_pch(0.0, 3.0); + let r = _mm512_maskz_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, a, b, + c, ); - let e = _mm512_set_ph( - 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., - 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., 0., 33., + let e = _mm512_setr_ph( + 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, + 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_add_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_set_sh(3.0); + unsafe fn test_mm_fcmadd_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_fcmadd_sch(a, b, c); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_add_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_set_sh(4.0); + unsafe fn test_mm_mask_fcmadd_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask_fcmadd_sch(a, 0, b, c); + let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); - let r = _mm_mask_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_set_sh(3.0); + let r = _mm_mask_fcmadd_sch(a, 1, b, c); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_add_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = - _mm_maskz_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_set_sh(0.0); + unsafe fn test_mm_mask3_fcmadd_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask3_fcmadd_sch(a, b, c, 0); + let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); assert_eq_m128h(r, e); - let r = - _mm_maskz_add_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_set_sh(3.0); + let r = _mm_mask3_fcmadd_sch(a, b, c, 1); + let e = _mm_setr_ph(2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_add_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_add_sh(a, b); - let e = _mm_set_sh(3.0); + unsafe fn test_mm_maskz_fcmadd_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_maskz_fcmadd_sch(0, a, b, c); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_fcmadd_sch(1, a, b, c); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_add_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_add_sh(src, 0, a, b); - let e = _mm_set_sh(4.0); + unsafe fn test_mm_fcmadd_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); - let r = _mm_mask_add_sh(src, 1, a, b); - let e = _mm_set_sh(3.0); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_fcmadd_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0, b, c, + ); + let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + let r = _mm_mask_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 1, b, c, + ); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_add_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_maskz_add_sh(0, a, b); - let e = _mm_set_sh(0.0); + unsafe fn test_mm_mask3_fcmadd_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask3_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 0, + ); + let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); assert_eq_m128h(r, e); - let r = _mm_maskz_add_sh(1, a, b); - let e = _mm_set_sh(3.0); + let r = _mm_mask3_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 1, + ); + let e = _mm_setr_ph(2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_fcmadd_round_sch() { + let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); + let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_maskz_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0, a, b, c, + ); + let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 1, a, b, c, + ); + let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_fmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_fmadd_ph(a, b, c); + let e = _mm_set1_ph(5.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_fmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask_fmadd_ph(a, 0b01010101, b, c); + let e = _mm_set_ph(1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_sub_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_sub_ph(a, b); - let e = _mm_set_ph(-7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0); + unsafe fn test_mm_mask3_fmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask3_fmadd_ph(a, b, c, 0b01010101); + let e = _mm_set_ph(3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_sub_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm_mask_sub_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(10., -5., 12., -1., 14., 3., 16., 7.); + unsafe fn test_mm_maskz_fmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_maskz_fmadd_ph(0b01010101, a, b, c); + let e = _mm_set_ph(0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_sub_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_maskz_sub_ph(0b01010101, a, b); - let e = _mm_set_ph(0., -5., 0., -1., 0., 3., 0., 7.); - assert_eq_m128h(r, e); + unsafe fn test_mm256_fmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_fmadd_ph(a, b, c); + let e = _mm256_set1_ph(5.0); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_sub_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_sub_ph(a, b); + unsafe fn test_mm256_mask_fmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask_fmadd_ph(a, 0b0101010101010101, b, c); let e = _mm256_set_ph( - -15.0, -13.0, -11.0, -9.0, -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, - 15.0, + 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_sub_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let src = _mm256_set_ph( - 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., - ); - let r = _mm256_mask_sub_ph(src, 0b0101010101010101, a, b); + unsafe fn test_mm256_mask3_fmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask3_fmadd_ph(a, b, c, 0b0101010101010101); let e = _mm256_set_ph( - 18., -13., 20., -9., 22., -5., 24., -1., 26., 3., 28., 7., 30., 11., 32., 15., + 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_sub_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_maskz_sub_ph(0b0101010101010101, a, b); + unsafe fn test_mm256_maskz_fmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_maskz_fmadd_ph(0b0101010101010101, a, b, c); let e = _mm256_set_ph( - 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., 0., 7., 0., 11., 0., 15., + 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_sub_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_sub_ph(a, b); + unsafe fn test_mm512_fmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fmadd_ph(a, b, c); + let e = _mm512_set1_ph(5.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_fmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmadd_ph(a, 0b01010101010101010101010101010101, b, c); let e = _mm512_set_ph( - -31.0, -29.0, -27.0, -25.0, -23.0, -21.0, -19.0, -17.0, -15.0, -13.0, -11.0, -9.0, - -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, - 23.0, 25.0, 27.0, 29.0, 31.0, + 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, + 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_sub_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., - ); - let r = _mm512_mask_sub_ph(src, 0b01010101010101010101010101010101, a, b); + unsafe fn test_mm512_mask3_fmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmadd_ph(a, b, c, 0b01010101010101010101010101010101); let e = _mm512_set_ph( - 34., -29., 36., -25., 38., -21., 40., -17., 42., -13., 44., -9., 46., -5., 48., -1., - 50., 3., 52., 7., 54., 11., 56., 15., 58., 19., 60., 23., 62., 27., 64., 31., + 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, + 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_sub_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_sub_ph(0b01010101010101010101010101010101, a, b); + unsafe fn test_mm512_maskz_fmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmadd_ph(0b01010101010101010101010101010101, a, b, c); let e = _mm512_set_ph( - 0., -29., 0., -25., 0., -21., 0., -17., 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., - 0., 7., 0., 11., 0., 15., 0., 19., 0., 23., 0., 27., 0., 31., + 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, + 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_sub_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, + unsafe fn test_mm512_fmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_ph(5.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_fmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, + 0b01010101010101010101010101010101, + b, + c, ); - let r = _mm512_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); let e = _mm512_set_ph( - -31.0, -29.0, -27.0, -25.0, -23.0, -21.0, -19.0, -17.0, -15.0, -13.0, -11.0, -9.0, - -7.0, -5.0, -3.0, -1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, - 23.0, 25.0, 27.0, 29.0, 31.0, + 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, + 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_sub_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., - ); - let r = _mm512_mask_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, - 0b01010101010101010101010101010101, + unsafe fn test_mm512_mask3_fmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, + c, + 0b01010101010101010101010101010101, ); let e = _mm512_set_ph( - 34., -29., 36., -25., 38., -21., 40., -17., 42., -13., 44., -9., 46., -5., 48., -1., - 50., 3., 52., 7., 54., 11., 56., 15., 58., 19., 60., 23., 62., 27., 64., 31., + 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, + 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_sub_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_sub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_maskz_fmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b01010101010101010101010101010101, a, b, + c, ); let e = _mm512_set_ph( - 0., -29., 0., -25., 0., -21., 0., -17., 0., -13., 0., -9., 0., -5., 0., -1., 0., 3., - 0., 7., 0., 11., 0., 15., 0., 19., 0., 23., 0., 27., 0., 31., + 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, + 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_sub_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_set_sh(-1.0); + unsafe fn test_mm_fmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fmadd_sh(a, b, c); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_fmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fmadd_sh(a, 0, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_mask_fmadd_sh(a, 1, b, c); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_sub_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_set_sh(4.0); + unsafe fn test_mm_mask3_fmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fmadd_sh(a, b, c, 0); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - let r = _mm_mask_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_set_sh(-1.0); + let r = _mm_mask3_fmadd_sh(a, b, c, 1); + let e = _mm_setr_ph(5.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_sub_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = - _mm_maskz_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_set_sh(0.0); + unsafe fn test_mm_maskz_fmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fmadd_sh(0, a, b, c); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = - _mm_maskz_sub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_set_sh(-1.0); + let r = _mm_maskz_fmadd_sh(1, a, b, c); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_sub_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_sub_sh(a, b); - let e = _mm_set_sh(-1.0); + unsafe fn test_mm_fmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_sub_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_sub_sh(src, 0, a, b); - let e = _mm_set_sh(4.0); + unsafe fn test_mm_mask_fmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0, b, c, + ); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_sub_sh(src, 1, a, b); - let e = _mm_set_sh(-1.0); + let r = _mm_mask_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 1, b, c, + ); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_sub_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_maskz_sub_sh(0, a, b); - let e = _mm_set_sh(0.0); + unsafe fn test_mm_mask3_fmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 0, + ); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - let r = _mm_maskz_sub_sh(1, a, b); - let e = _mm_set_sh(-1.0); + let r = _mm_mask3_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 1, + ); + let e = _mm_setr_ph(5.0, 30., 31., 32., 33., 34., 35., 36.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_fmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0, a, b, c, + ); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_maskz_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 1, a, b, c, + ); + let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mul_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_mul_ph(a, b); - let e = _mm_set_ph(8.0, 14.0, 18.0, 20.0, 20.0, 18.0, 14.0, 8.0); + unsafe fn test_mm_fmsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_fmsub_ph(a, b, c); + let e = _mm_set1_ph(-1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_mul_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm_mask_mul_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(10., 14., 12., 20., 14., 18., 16., 8.); + unsafe fn test_mm_mask_fmsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask_fmsub_ph(a, 0b01010101, b, c); + let e = _mm_set_ph(1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_mul_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0); - let r = _mm_maskz_mul_ph(0b01010101, a, b); - let e = _mm_set_ph(0., 14., 0., 20., 0., 18., 0., 8.); + unsafe fn test_mm_mask3_fmsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask3_fmsub_ph(a, b, c, 0b01010101); + let e = _mm_set_ph(3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mul_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_mul_ph(a, b); + unsafe fn test_mm_maskz_fmsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_maskz_fmsub_ph(0b01010101, a, b, c); + let e = _mm_set_ph(0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_fmsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_fmsub_ph(a, b, c); + let e = _mm256_set1_ph(-1.0); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_fmsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask_fmsub_ph(a, 0b0101010101010101, b, c); let e = _mm256_set_ph( - 16.0, 30.0, 42.0, 52.0, 60.0, 66.0, 70.0, 72.0, 72.0, 70.0, 66.0, 60.0, 52.0, 42.0, - 30.0, 16.0, + 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_mul_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let src = _mm256_set_ph( - 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., - ); - let r = _mm256_mask_mul_ph(src, 0b0101010101010101, a, b); + unsafe fn test_mm256_mask3_fmsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask3_fmsub_ph(a, b, c, 0b0101010101010101); let e = _mm256_set_ph( - 18., 30., 20., 52., 22., 66., 24., 72., 26., 70., 28., 60., 30., 42., 32., 16., + 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_mul_ph() { - let a = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - let b = _mm256_set_ph( - 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, - ); - let r = _mm256_maskz_mul_ph(0b0101010101010101, a, b); + unsafe fn test_mm256_maskz_fmsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_maskz_fmsub_ph(0b0101010101010101, a, b, c); let e = _mm256_set_ph( - 0., 30., 0., 52., 0., 66., 0., 72., 0., 70., 0., 60., 0., 42., 0., 16., + 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mul_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_mul_ph(a, b); + unsafe fn test_mm512_fmsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fmsub_ph(a, b, c); + let e = _mm512_set1_ph(-1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_fmsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmsub_ph(a, 0b01010101010101010101010101010101, b, c); let e = _mm512_set_ph( - 32.0, 62.0, 90.0, 116.0, 140.0, 162.0, 182.0, 200.0, 216.0, 230.0, 242.0, 252.0, 260.0, - 266.0, 270.0, 272.0, 272.0, 270.0, 266.0, 260.0, 252.0, 242.0, 230.0, 216.0, 200.0, - 182.0, 162.0, 140.0, 116.0, 90.0, 62.0, 32.0, + 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, + 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_mul_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., - ); - let r = _mm512_mask_mul_ph(src, 0b01010101010101010101010101010101, a, b); + unsafe fn test_mm512_mask3_fmsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmsub_ph(a, b, c, 0b01010101010101010101010101010101); let e = _mm512_set_ph( - 34., 62., 36., 116., 38., 162., 40., 200., 42., 230., 44., 252., 46., 266., 48., 272., - 50., 270., 52., 260., 54., 242., 56., 216., 58., 182., 60., 140., 62., 90., 64., 32., + 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, + 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_mul_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_mul_ph(0b01010101010101010101010101010101, a, b); + unsafe fn test_mm512_maskz_fmsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmsub_ph(0b01010101010101010101010101010101, a, b, c); let e = _mm512_set_ph( - 0., 62., 0., 116., 0., 162., 0., 200., 0., 230., 0., 252., 0., 266., 0., 272., 0., - 270., 0., 260., 0., 242., 0., 216., 0., 182., 0., 140., 0., 90., 0., 32., + 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, + 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mul_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, + unsafe fn test_mm512_fmsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_ph(-1.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_fmsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, + 0b01010101010101010101010101010101, + b, + c, ); - let r = _mm512_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); let e = _mm512_set_ph( - 32.0, 62.0, 90.0, 116.0, 140.0, 162.0, 182.0, 200.0, 216.0, 230.0, 242.0, 252.0, 260.0, - 266.0, 270.0, 272.0, 272.0, 270.0, 266.0, 260.0, 252.0, 242.0, 230.0, 216.0, 200.0, - 182.0, 162.0, 140.0, 116.0, 90.0, 62.0, 32.0, + 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, + 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_mul_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let src = _mm512_set_ph( - 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., - 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., - ); - let r = _mm512_mask_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, - 0b01010101010101010101010101010101, + unsafe fn test_mm512_mask3_fmsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, + c, + 0b01010101010101010101010101010101, ); let e = _mm512_set_ph( - 34., 62., 36., 116., 38., 162., 40., 200., 42., 230., 44., 252., 46., 266., 48., 272., - 50., 270., 52., 260., 54., 242., 56., 216., 58., 182., 60., 140., 62., 90., 64., 32., + 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, + 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_mul_round_ph() { - let a = _mm512_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, - ); - let b = _mm512_set_ph( - 32.0, 31.0, 30.0, 29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0, 19.0, - 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, - 3.0, 2.0, 1.0, - ); - let r = _mm512_maskz_mul_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_maskz_fmsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b01010101010101010101010101010101, a, b, + c, ); let e = _mm512_set_ph( - 0., 62., 0., 116., 0., 162., 0., 200., 0., 230., 0., 252., 0., 266., 0., 272., 0., - 270., 0., 260., 0., 242., 0., 216., 0., 182., 0., 140., 0., 90., 0., 32., + 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, + 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mul_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_set_sh(2.0); + unsafe fn test_mm_fmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fmsub_sh(a, b, c); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_mul_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_set_sh(4.0); + unsafe fn test_mm_mask_fmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fmsub_sh(a, 0, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_set_sh(2.0); + let r = _mm_mask_fmsub_sh(a, 1, b, c); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_mul_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = - _mm_maskz_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_set_sh(0.0); + unsafe fn test_mm_mask3_fmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fmsub_sh(a, b, c, 0); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - let r = - _mm_maskz_mul_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_set_sh(2.0); + let r = _mm_mask3_fmsub_sh(a, b, c, 1); + let e = _mm_setr_ph(-1.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mul_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_mul_sh(a, b); - let e = _mm_set_sh(2.0); + unsafe fn test_mm_maskz_fmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fmsub_sh(0, a, b, c); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_maskz_fmsub_sh(1, a, b, c); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_mul_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_mul_sh(src, 0, a, b); - let e = _mm_set_sh(4.0); + unsafe fn test_mm_fmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_mul_sh(src, 1, a, b); - let e = _mm_set_sh(2.0); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_fmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0, b, c, + ); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_mask_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 1, b, c, + ); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask3_fmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 0, + ); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + assert_eq_m128h(r, e); + let r = _mm_mask3_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 1, + ); + let e = _mm_setr_ph(-1.0, 30., 31., 32., 33., 34., 35., 36.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_fmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0, a, b, c, + ); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_maskz_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 1, a, b, c, + ); + let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_mul_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_maskz_mul_sh(0, a, b); - let e = _mm_set_sh(0.0); - assert_eq_m128h(r, e); - let r = _mm_maskz_mul_sh(1, a, b); - let e = _mm_set_sh(2.0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_fnmadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_fnmadd_ph(a, b, c); + let e = _mm_set1_ph(1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_div_ph() { + unsafe fn test_mm_mask_fnmadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); - let r = _mm_div_ph(a, b); - let e = _mm_set1_ph(0.5); + let c = _mm_set1_ph(3.0); + let r = _mm_mask_fnmadd_ph(a, 0b01010101, b, c); + let e = _mm_set_ph(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_div_ph() { + unsafe fn test_mm_mask3_fnmadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); - let src = _mm_set_ph(4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0); - let r = _mm_mask_div_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5); + let c = _mm_set1_ph(3.0); + let r = _mm_mask3_fnmadd_ph(a, b, c, 0b01010101); + let e = _mm_set_ph(3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_div_ph() { + unsafe fn test_mm_maskz_fnmadd_ph() { let a = _mm_set1_ph(1.0); let b = _mm_set1_ph(2.0); - let r = _mm_maskz_div_ph(0b01010101, a, b); - let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); + let c = _mm_set1_ph(3.0); + let r = _mm_maskz_fnmadd_ph(0b01010101, a, b, c); + let e = _mm_set_ph(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_div_ph() { + unsafe fn test_mm256_fnmadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); - let r = _mm256_div_ph(a, b); - let e = _mm256_set1_ph(0.5); + let c = _mm256_set1_ph(3.0); + let r = _mm256_fnmadd_ph(a, b, c); + let e = _mm256_set1_ph(1.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_div_ph() { + unsafe fn test_mm256_mask_fnmadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); - let src = _mm256_set_ph( - 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, - 19.0, + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask_fnmadd_ph(a, 0b0101010101010101, b, c); + let e = _mm256_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ); - let r = _mm256_mask_div_ph(src, 0b0101010101010101, a, b); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask3_fnmadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask3_fnmadd_ph(a, b, c, 0b0101010101010101); let e = _mm256_set_ph( - 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_div_ph() { + unsafe fn test_mm256_maskz_fnmadd_ph() { let a = _mm256_set1_ph(1.0); let b = _mm256_set1_ph(2.0); - let r = _mm256_maskz_div_ph(0b0101010101010101, a, b); + let c = _mm256_set1_ph(3.0); + let r = _mm256_maskz_fnmadd_ph(0b0101010101010101, a, b, c); let e = _mm256_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_div_ph() { + unsafe fn test_mm512_fnmadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); - let r = _mm512_div_ph(a, b); - let e = _mm512_set1_ph(0.5); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fnmadd_ph(a, b, c); + let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_div_ph() { + unsafe fn test_mm512_mask_fnmadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); - let src = _mm512_set_ph( - 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, - 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, - 33.0, 34.0, 35.0, + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fnmadd_ph(a, 0b01010101010101010101010101010101, b, c); + let e = _mm512_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ); - let r = _mm512_mask_div_ph(src, 0b01010101010101010101010101010101, a, b); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask3_fnmadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fnmadd_ph(a, b, c, 0b01010101010101010101010101010101); let e = _mm512_set_ph( - 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, - 20.0, 0.5, 22.0, 0.5, 24.0, 0.5, 26.0, 0.5, 28.0, 0.5, 30.0, 0.5, 32.0, 0.5, 34.0, 0.5, + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, + 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_div_ph() { + unsafe fn test_mm512_maskz_fnmadd_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); - let r = _mm512_maskz_div_ph(0b01010101010101010101010101010101, a, b); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fnmadd_ph(0b01010101010101010101010101010101, a, b, c); let e = _mm512_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, - 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_div_round_ph() { + unsafe fn test_mm512_fnmadd_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); - let r = _mm512_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_ph(0.5); + let c = _mm512_set1_ph(3.0); + let r = + _mm512_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_div_round_ph() { + unsafe fn test_mm512_mask_fnmadd_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); - let src = _mm512_set_ph( - 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, - 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, - 33.0, 34.0, 35.0, - ); - let r = _mm512_mask_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0b01010101010101010101010101010101, + b, + c, + ); + let e = _mm512_set_ph( + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask3_fnmadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, + c, + 0b01010101010101010101010101010101, ); let e = _mm512_set_ph( - 4.0, 0.5, 6.0, 0.5, 8.0, 0.5, 10.0, 0.5, 12.0, 0.5, 14.0, 0.5, 16.0, 0.5, 18.0, 0.5, - 20.0, 0.5, 22.0, 0.5, 24.0, 0.5, 26.0, 0.5, 28.0, 0.5, 30.0, 0.5, 32.0, 0.5, 34.0, 0.5, + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, + 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_div_round_ph() { + unsafe fn test_mm512_maskz_fnmadd_round_ph() { let a = _mm512_set1_ph(1.0); let b = _mm512_set1_ph(2.0); - let r = _mm512_maskz_div_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b01010101010101010101010101010101, a, b, + c, ); let e = _mm512_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, - 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_div_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_set_sh(0.5); + unsafe fn test_mm_fnmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fnmadd_sh(a, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_div_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_set_sh(4.0); + unsafe fn test_mm_mask_fnmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fnmadd_sh(a, 0, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_set_sh(0.5); + let r = _mm_mask_fnmadd_sh(a, 1, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_div_round_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = - _mm_maskz_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_set_sh(0.0); + unsafe fn test_mm_mask3_fnmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fnmadd_sh(a, b, c, 0); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); - let r = - _mm_maskz_div_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_set_sh(0.5); + let r = _mm_mask3_fnmadd_sh(a, b, c, 1); + let e = _mm_setr_ph(1.0, 30., 31., 32., 33., 34., 35., 36.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_fnmadd_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fnmadd_sh(0, a, b, c); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_maskz_fnmadd_sh(1, a, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_fnmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_fnmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0, b, c, + ); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_mask_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 1, b, c, + ); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_div_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_div_sh(a, b); - let e = _mm_set_sh(0.5); + unsafe fn test_mm_mask3_fnmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 0, + ); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + assert_eq_m128h(r, e); + let r = _mm_mask3_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 1, + ); + let e = _mm_setr_ph(1.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_div_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let src = _mm_set_sh(4.0); - let r = _mm_mask_div_sh(src, 0, a, b); - let e = _mm_set_sh(4.0); + unsafe fn test_mm_maskz_fnmadd_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0, a, b, c, + ); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_div_sh(src, 1, a, b); - let e = _mm_set_sh(0.5); + let r = _mm_maskz_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 1, a, b, c, + ); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_div_sh() { - let a = _mm_set_sh(1.0); - let b = _mm_set_sh(2.0); - let r = _mm_maskz_div_sh(0, a, b); - let e = _mm_set_sh(0.0); - assert_eq_m128h(r, e); - let r = _mm_maskz_div_sh(1, a, b); - let e = _mm_set_sh(0.5); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_fnmsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_fnmsub_ph(a, b, c); + let e = _mm_set1_ph(-5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); - let r = _mm_mul_pch(a, b); - let e = _mm_set1_pch(-1.0, 0.0); + unsafe fn test_mm_mask_fnmsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask_fnmsub_ph(a, 0b01010101, b, c); + let e = _mm_set_ph(1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_mul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); - let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); - let r = _mm_mask_mul_pch(src, 0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); + unsafe fn test_mm_mask3_fnmsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask3_fnmsub_ph(a, b, c, 0b01010101); + let e = _mm_set_ph(3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_mul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); - let r = _mm_maskz_mul_pch(0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); + unsafe fn test_mm_maskz_fnmsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_maskz_fnmsub_ph(0b01010101, a, b, c); + let e = _mm_set_ph(0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_mul_pch(a, b); - let e = _mm256_set1_pch(-1.0, 0.0); + unsafe fn test_mm256_fnmsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_fnmsub_ph(a, b, c); + let e = _mm256_set1_ph(-5.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_mul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); - let src = _mm256_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + unsafe fn test_mm256_mask_fnmsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask_fnmsub_ph(a, 0b0101010101010101, b, c); + let e = _mm256_set_ph( + 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, ); - let r = _mm256_mask_mul_pch(src, 0b01010101, a, b); - let e = _mm256_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask3_fnmsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask3_fnmsub_ph(a, b, c, 0b0101010101010101); + let e = _mm256_set_ph( + 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_mul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_maskz_mul_pch(0b01010101, a, b); - let e = _mm256_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + unsafe fn test_mm256_maskz_fnmsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_maskz_fnmsub_ph(0b0101010101010101, a, b, c); + let e = _mm256_set_ph( + 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_mul_pch(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + unsafe fn test_mm512_fnmsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fnmsub_ph(a, b, c); + let e = _mm512_set1_ph(-5.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_mul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, + unsafe fn test_mm512_mask_fnmsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fnmsub_ph(a, 0b01010101010101010101010101010101, b, c); + let e = _mm512_set_ph( + 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, + 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, ); - let r = _mm512_mask_mul_pch(src, 0b0101010101010101, a, b); - let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask3_fnmsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fnmsub_ph(a, b, c, 0b01010101010101010101010101010101); + let e = _mm512_set_ph( + 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, + 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_mul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_maskz_mul_pch(0b0101010101010101, a, b); - let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + unsafe fn test_mm512_maskz_fnmsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fnmsub_ph(0b01010101010101010101010101010101, a, b, c); + let e = _mm512_set_ph( + 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, + 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + unsafe fn test_mm512_fnmsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = + _mm512_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set1_ph(-5.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_mul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, + unsafe fn test_mm512_mask_fnmsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, + 0b01010101010101010101010101010101, + b, + c, ); - let r = _mm512_mask_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, - 0b0101010101010101, + let e = _mm512_set_ph( + 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, + 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask3_fnmsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, + c, + 0b01010101010101010101010101010101, ); - let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + let e = _mm512_set_ph( + 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, + 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_mul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_maskz_mul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b0101010101010101, + unsafe fn test_mm512_maskz_fnmsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, a, b, + c, ); - let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + let e = _mm512_set_ph( + 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, + 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_fnmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fnmsub_sh(a, b, c); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_mul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_mask_fnmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fnmsub_sh(a, 0, b, c); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_mask_fnmsub_sh(a, 1, b, c); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_mul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = - _mm_maskz_mul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_mask3_fnmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fnmsub_sh(a, b, c, 0); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + assert_eq_m128h(r, e); + let r = _mm_mask3_fnmsub_sh(a, b, c, 1); + let e = _mm_setr_ph(-5.0, 30., 31., 32., 33., 34., 35., 36.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_mul_sch(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_maskz_fnmsub_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fnmsub_sh(0, a, b, c); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_maskz_fnmsub_sh(1, a, b, c); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_mul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_mul_sch(src, 0, a, b); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_fnmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_mul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_maskz_mul_sch(0, a, b); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_mask_fnmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 0, b, c, + ); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_mask_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, 1, b, c, + ); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask3_fnmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask3_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 0, + ); + let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + assert_eq_m128h(r, e); + let r = _mm_mask3_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + a, b, c, 1, + ); + let e = _mm_setr_ph(-5.0, 30., 31., 32., 33., 34., 35., 36.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_fnmsub_round_sh() { + let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); + let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_maskz_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0, a, b, c, + ); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_maskz_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 1, a, b, c, + ); + let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); - let r = _mm_fmul_pch(a, b); - let e = _mm_set1_pch(-1.0, 0.0); + unsafe fn test_mm_fmaddsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_fmaddsub_ph(a, b, c); + let e = _mm_set_ph(5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); - let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); - let r = _mm_mask_fmul_pch(src, 0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); + unsafe fn test_mm_mask_fmaddsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask_fmaddsub_ph(a, 0b00110011, b, c); + let e = _mm_set_ph(1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 1.0); - let r = _mm_maskz_fmul_pch(0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); + unsafe fn test_mm_mask3_fmaddsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask3_fmaddsub_ph(a, b, c, 0b00110011); + let e = _mm_set_ph(3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_fmul_pch(a, b); - let e = _mm256_set1_pch(-1.0, 0.0); + unsafe fn test_mm_maskz_fmaddsub_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_maskz_fmaddsub_ph(0b00110011, a, b, c); + let e = _mm_set_ph(0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_fmaddsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_fmaddsub_ph(a, b, c); + let e = _mm256_set_ph( + 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, + ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); - let src = _mm256_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, + unsafe fn test_mm256_mask_fmaddsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask_fmaddsub_ph(a, 0b0011001100110011, b, c); + let e = _mm256_set_ph( + 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, ); - let r = _mm256_mask_fmul_pch(src, 0b01010101, a, b); - let e = _mm256_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask3_fmaddsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask3_fmaddsub_ph(a, b, c, 0b0011001100110011); + let e = _mm256_set_ph( + 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_maskz_fmul_pch(0b01010101, a, b); - let e = _mm256_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + unsafe fn test_mm256_maskz_fmaddsub_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_maskz_fmaddsub_ph(0b0011001100110011, a, b, c); + let e = _mm256_set_ph( + 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_fmul_pch(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + unsafe fn test_mm512_fmaddsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fmaddsub_ph(a, b, c); + let e = _mm512_set_ph( + 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, + 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_fmaddsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmaddsub_ph(a, 0b00110011001100110011001100110011, b, c); + let e = _mm512_set_ph( + 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, + 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, - ); - let r = _mm512_mask_fmul_pch(src, 0b0101010101010101, a, b); - let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + unsafe fn test_mm512_mask3_fmaddsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmaddsub_ph(a, b, c, 0b00110011001100110011001100110011); + let e = _mm512_set_ph( + 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, + 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_maskz_fmul_pch(0b0101010101010101, a, b); - let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + unsafe fn test_mm512_maskz_fmaddsub_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmaddsub_ph(0b00110011001100110011001100110011, a, b, c); + let e = _mm512_set_ph( + 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, + 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + unsafe fn test_mm512_fmaddsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = + _mm512_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set_ph( + 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, + 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, - ); - let r = _mm512_mask_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, - 0b0101010101010101, + unsafe fn test_mm512_mask_fmaddsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, + 0b00110011001100110011001100110011, b, + c, ); - let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + let e = _mm512_set_ph( + 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, + 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_maskz_fmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b0101010101010101, + unsafe fn test_mm512_mask3_fmaddsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, + c, + 0b00110011001100110011001100110011, ); - let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + let e = _mm512_set_ph( + 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, + 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, + unsafe fn test_mm512_maskz_fmaddsub_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b00110011001100110011001100110011, + a, + b, + c, ); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = - _mm_maskz_fmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); + let e = _mm512_set_ph( + 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, + 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, + ); + assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_fmul_sch(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_fmsubadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_fmsubadd_ph(a, b, c); + let e = _mm_set_ph(-1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_fmul_sch(src, 0, a, b); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_fmsubadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask_fmsubadd_ph(a, 0b00110011, b, c); + let e = _mm_set_ph(1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 1.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let r = _mm_maskz_fmul_sch(0, a, b); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask3_fmsubadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_mask3_fmsubadd_ph(a, b, c, 0b00110011); + let e = _mm_set_ph(3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_cmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let r = _mm_cmul_pch(a, b); - let e = _mm_set1_pch(-1.0, 0.0); + unsafe fn test_mm_maskz_fmsubadd_ph() { + let a = _mm_set1_ph(1.0); + let b = _mm_set1_ph(2.0); + let c = _mm_set1_ph(3.0); + let r = _mm_maskz_fmsubadd_ph(0b00110011, a, b, c); + let e = _mm_set_ph(0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_cmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); - let r = _mm_mask_cmul_pch(src, 0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); - assert_eq_m128h(r, e); + unsafe fn test_mm256_fmsubadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_fmsubadd_ph(a, b, c); + let e = _mm256_set_ph( + -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_cmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let r = _mm_maskz_cmul_pch(0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); - assert_eq_m128h(r, e); + unsafe fn test_mm256_mask_fmsubadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask_fmsubadd_ph(a, 0b0011001100110011, b, c); + let e = _mm256_set_ph( + 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_cmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let r = _mm256_cmul_pch(a, b); - let e = _mm256_set1_pch(-1.0, 0.0); + unsafe fn test_mm256_mask3_fmsubadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_mask3_fmsubadd_ph(a, b, c, 0b0011001100110011); + let e = _mm256_set_ph( + 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, + ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_cmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let src = _mm256_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - ); - let r = _mm256_mask_cmul_pch(src, 0b01010101, a, b); - let e = _mm256_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, + unsafe fn test_mm256_maskz_fmsubadd_ph() { + let a = _mm256_set1_ph(1.0); + let b = _mm256_set1_ph(2.0); + let c = _mm256_set1_ph(3.0); + let r = _mm256_maskz_fmsubadd_ph(0b0011001100110011, a, b, c); + let e = _mm256_set_ph( + 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, ); assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_cmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let r = _mm256_maskz_cmul_pch(0b01010101, a, b); - let e = _mm256_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_fmsubadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_fmsubadd_ph(a, b, c); + let e = _mm512_set_ph( + -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, ); - assert_eq_m256h(r, e); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_cmul_pch(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + unsafe fn test_mm512_mask_fmsubadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmsubadd_ph(a, 0b00110011001100110011001100110011, b, c); + let e = _mm512_set_ph( + 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, + 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, - ); - let r = _mm512_mask_cmul_pch(src, 0b0101010101010101, a, b); - let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + unsafe fn test_mm512_mask3_fmsubadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmsubadd_ph(a, b, c, 0b00110011001100110011001100110011); + let e = _mm512_set_ph( + 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, + 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_cmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_maskz_cmul_pch(0b0101010101010101, a, b); - let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + unsafe fn test_mm512_maskz_fmsubadd_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmsubadd_ph(0b00110011001100110011001100110011, a, b, c); + let e = _mm512_set_ph( + 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, + 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + unsafe fn test_mm512_fmsubadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = + _mm512_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + let e = _mm512_set_ph( + -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, - ); - let r = _mm512_mask_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, - 0b0101010101010101, + unsafe fn test_mm512_mask_fmsubadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, + 0b00110011001100110011001100110011, b, + c, ); - let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + let e = _mm512_set_ph( + 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, + 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_cmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_maskz_cmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b0101010101010101, + unsafe fn test_mm512_mask3_fmsubadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_mask3_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, b, + c, + 0b00110011001100110011001100110011, ); - let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + let e = _mm512_set_ph( + 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, + 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_cmul_sch(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_cmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_cmul_sch(src, 0, a, b); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_cmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_maskz_cmul_sch(0, a, b); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_cmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, + unsafe fn test_mm512_maskz_fmsubadd_round_ph() { + let a = _mm512_set1_ph(1.0); + let b = _mm512_set1_ph(2.0); + let c = _mm512_set1_ph(3.0); + let r = _mm512_maskz_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b00110011001100110011001100110011, + a, + b, + c, ); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_cmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = - _mm_maskz_cmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); + let e = _mm512_set_ph( + 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, + 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fcmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let r = _mm_fcmul_pch(a, b); - let e = _mm_set1_pch(-1.0, 0.0); + unsafe fn test_mm_rcp_ph() { + let a = _mm_set1_ph(2.0); + let r = _mm_rcp_ph(a); + let e = _mm_set1_ph(0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fcmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); - let r = _mm_mask_fcmul_pch(src, 0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0); + unsafe fn test_mm_mask_rcp_ph() { + let a = _mm_set1_ph(2.0); + let src = _mm_set1_ph(1.0); + let r = _mm_mask_rcp_ph(src, 0b01010101, a); + let e = _mm_set_ph(1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fcmul_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, -1.0); - let r = _mm_maskz_fcmul_pch(0b0101, a, b); - let e = _mm_setr_ph(-1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0); + unsafe fn test_mm_maskz_rcp_ph() { + let a = _mm_set1_ph(2.0); + let r = _mm_maskz_rcp_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fcmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let r = _mm256_fcmul_pch(a, b); - let e = _mm256_set1_pch(-1.0, 0.0); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fcmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let src = _mm256_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - ); - let r = _mm256_mask_fcmul_pch(src, 0b01010101, a, b); - let e = _mm256_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - ); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fcmul_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, -1.0); - let r = _mm256_maskz_fcmul_pch(0b01010101, a, b); - let e = _mm256_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - ); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fcmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_fcmul_pch(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); - assert_eq_m512h(r, e); + unsafe fn test_mm256_rcp_ph() { + let a = _mm256_set1_ph(2.0); + let r = _mm256_rcp_ph(a); + let e = _mm256_set1_ph(0.5); + assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fcmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, - ); - let r = _mm512_mask_fcmul_pch(src, 0b0101010101010101, a, b); - let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_rcp_ph() { + let a = _mm256_set1_ph(2.0); + let src = _mm256_set1_ph(1.0); + let r = _mm256_mask_rcp_ph(src, 0b0101010101010101, a); + let e = _mm256_set_ph( + 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, ); - assert_eq_m512h(r, e); + assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fcmul_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_maskz_fcmul_pch(0b0101010101010101, a, b); - let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_rcp_ph() { + let a = _mm256_set1_ph(2.0); + let r = _mm256_maskz_rcp_ph(0b0101010101010101, a); + let e = _mm256_set_ph( + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, ); - assert_eq_m512h(r, e); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fcmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_pch(-1.0, 0.0); + unsafe fn test_mm512_rcp_ph() { + let a = _mm512_set1_ph(2.0); + let r = _mm512_rcp_ph(a); + let e = _mm512_set1_ph(0.5); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fcmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, - ); - let r = _mm512_mask_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, - 0b0101010101010101, - a, - b, - ); - let e = _mm512_setr_ph( - -1.0, 0.0, 4.0, 5.0, -1.0, 0.0, 8.0, 9.0, -1.0, 0.0, 12.0, 13.0, -1.0, 0.0, 16.0, 17.0, - -1.0, 0.0, 20.0, 21.0, -1.0, 0.0, 24.0, 25.0, -1.0, 0.0, 28.0, 29.0, -1.0, 0.0, 32.0, - 33.0, + unsafe fn test_mm512_mask_rcp_ph() { + let a = _mm512_set1_ph(2.0); + let src = _mm512_set1_ph(1.0); + let r = _mm512_mask_rcp_ph(src, 0b01010101010101010101010101010101, a); + let e = _mm512_set_ph( + 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, + 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fcmul_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, -1.0); - let r = _mm512_maskz_fcmul_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b0101010101010101, - a, - b, - ); - let e = _mm512_setr_ph( - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, - -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, + unsafe fn test_mm512_maskz_rcp_ph() { + let a = _mm512_set1_ph(2.0); + let r = _mm512_maskz_rcp_ph(0b01010101010101010101010101010101, a); + let e = _mm512_set_ph( + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, + 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fcmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_fcmul_sch(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_rcp_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_rcp_sh(a, b); + let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fcmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_fcmul_sch(src, 0, a, b); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_mask_rcp_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_rcp_sh(src, 0, a, b); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_mask_rcp_sh(src, 1, a, b); + let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fcmul_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_maskz_fcmul_sch(0, a, b); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_maskz_rcp_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_maskz_rcp_sh(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_rcp_sh(1, a, b); + let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fcmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = _mm_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(-1.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_rsqrt_ph() { + let a = _mm_set1_ph(4.0); + let r = _mm_rsqrt_ph(a); + let e = _mm_set1_ph(0.5); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fcmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let src = _mm_setr_ph(14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0); - let r = _mm_mask_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_setr_ph(14.0, 15.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_rsqrt_ph() { + let a = _mm_set1_ph(4.0); + let src = _mm_set1_ph(1.0); + let r = _mm_mask_rsqrt_ph(src, 0b01010101, a); + let e = _mm_set_ph(1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fcmul_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, -1.0, 8.0, -9.0, 10.0, -11.0, 12.0, -13.0); - let r = - _mm_maskz_fcmul_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_rsqrt_ph() { + let a = _mm_set1_ph(4.0); + let r = _mm_maskz_rsqrt_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_abs_ph() { - let a = _mm_set_ph(-1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0); - let r = _mm_abs_ph(a); - let e = _mm_set_ph(1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0); - assert_eq_m128h(r, e); + unsafe fn test_mm256_rsqrt_ph() { + let a = _mm256_set1_ph(4.0); + let r = _mm256_rsqrt_ph(a); + let e = _mm256_set1_ph(0.5); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_abs_ph() { - let a = _mm256_set_ph( - -1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, - -14.0, + unsafe fn test_mm256_mask_rsqrt_ph() { + let a = _mm256_set1_ph(4.0); + let src = _mm256_set1_ph(1.0); + let r = _mm256_mask_rsqrt_ph(src, 0b0101010101010101, a); + let e = _mm256_set_ph( + 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, ); - let r = _mm256_abs_ph(a); + assert_eq_m256h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_rsqrt_ph() { + let a = _mm256_set1_ph(4.0); + let r = _mm256_maskz_rsqrt_ph(0b0101010101010101, a); let e = _mm256_set_ph( - 1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_abs_ph() { - let a = _mm512_set_ph( - -1.0, 0.0, 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, - -14.0, 15.0, -16.0, 17.0, -18.0, 19.0, -20.0, 21.0, -22.0, 23.0, -24.0, 25.0, -26.0, - 27.0, -28.0, 29.0, -30.0, + unsafe fn test_mm512_rsqrt_ph() { + let a = _mm512_set1_ph(4.0); + let r = _mm512_rsqrt_ph(a); + let e = _mm512_set1_ph(0.5); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_rsqrt_ph() { + let a = _mm512_set1_ph(4.0); + let src = _mm512_set1_ph(1.0); + let r = _mm512_mask_rsqrt_ph(src, 0b01010101010101010101010101010101, a); + let e = _mm512_set_ph( + 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, + 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, ); - let r = _mm512_abs_ph(a); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_rsqrt_ph() { + let a = _mm512_set1_ph(4.0); + let r = _mm512_maskz_rsqrt_ph(0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, - 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, - 29.0, 30.0, + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, + 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, ); assert_eq_m512h(r, e); } + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_rsqrt_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let r = _mm_rsqrt_sh(a, b); + let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_rsqrt_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_rsqrt_sh(src, 0, a, b); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_mask_rsqrt_sh(src, 1, a, b); + let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_rsqrt_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let r = _mm_maskz_rsqrt_sh(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_rsqrt_sh(1, a, b); + let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_conj_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let r = _mm_conj_pch(a); - let e = _mm_set1_pch(0.0, -1.0); + unsafe fn test_mm_sqrt_ph() { + let a = _mm_set1_ph(4.0); + let r = _mm_sqrt_ph(a); + let e = _mm_set1_ph(2.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_conj_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let src = _mm_setr_ph(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0); - let r = _mm_mask_conj_pch(src, 0b0101, a); - let e = _mm_setr_ph(0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0); + unsafe fn test_mm_mask_sqrt_ph() { + let a = _mm_set1_ph(4.0); + let src = _mm_set1_ph(1.0); + let r = _mm_mask_sqrt_ph(src, 0b01010101, a); + let e = _mm_set_ph(1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_conj_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let r = _mm_maskz_conj_pch(0b0101, a); - let e = _mm_setr_ph(0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0); + unsafe fn test_mm_maskz_sqrt_ph() { + let a = _mm_set1_ph(4.0); + let r = _mm_maskz_sqrt_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_conj_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_conj_pch(a); - let e = _mm256_set1_pch(0.0, -1.0); + unsafe fn test_mm256_sqrt_ph() { + let a = _mm256_set1_ph(4.0); + let r = _mm256_sqrt_ph(a); + let e = _mm256_set1_ph(2.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_conj_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let src = _mm256_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - ); - let r = _mm256_mask_conj_pch(src, 0b01010101, a); - let e = _mm256_setr_ph( - 0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0, 0.0, -1.0, 12.0, 13.0, 0.0, -1.0, 16.0, 17.0, + unsafe fn test_mm256_mask_sqrt_ph() { + let a = _mm256_set1_ph(4.0); + let src = _mm256_set1_ph(1.0); + let r = _mm256_mask_sqrt_ph(src, 0b0101010101010101, a); + let e = _mm256_set_ph( + 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_conj_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let r = _mm256_maskz_conj_pch(0b01010101, a); - let e = _mm256_setr_ph( - 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, + unsafe fn test_mm256_maskz_sqrt_ph() { + let a = _mm256_set1_ph(4.0); + let r = _mm256_maskz_sqrt_ph(0b0101010101010101, a); + let e = _mm256_set_ph( + 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_conj_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_conj_pch(a); - let e = _mm512_set1_pch(0.0, -1.0); + unsafe fn test_mm512_sqrt_ph() { + let a = _mm512_set1_ph(4.0); + let r = _mm512_sqrt_ph(a); + let e = _mm512_set1_ph(2.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_conj_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let src = _mm512_setr_ph( - 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, - 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, + unsafe fn test_mm512_mask_sqrt_ph() { + let a = _mm512_set1_ph(4.0); + let src = _mm512_set1_ph(1.0); + let r = _mm512_mask_sqrt_ph(src, 0b01010101010101010101010101010101, a); + let e = _mm512_set_ph( + 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, + 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, ); - let r = _mm512_mask_conj_pch(src, 0b0101010101010101, a); - let e = _mm512_setr_ph( - 0.0, -1.0, 4.0, 5.0, 0.0, -1.0, 8.0, 9.0, 0.0, -1.0, 12.0, 13.0, 0.0, -1.0, 16.0, 17.0, - 0.0, -1.0, 20.0, 21.0, 0.0, -1.0, 24.0, 25.0, 0.0, -1.0, 28.0, 29.0, 0.0, -1.0, 32.0, - 33.0, + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_sqrt_ph() { + let a = _mm512_set1_ph(4.0); + let r = _mm512_maskz_sqrt_ph(0b01010101010101010101010101010101, a); + let e = _mm512_set_ph( + 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, + 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_conj_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let r = _mm512_maskz_conj_pch(0b0101010101010101, a); - let e = _mm512_setr_ph( - 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, - 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, + unsafe fn test_mm512_sqrt_round_ph() { + let a = _mm512_set1_ph(4.0); + let r = _mm512_sqrt_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + let e = _mm512_set1_ph(2.0); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_sqrt_round_ph() { + let a = _mm512_set1_ph(4.0); + let src = _mm512_set1_ph(1.0); + let r = _mm512_mask_sqrt_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + ); + let e = _mm512_set_ph( + 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, + 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, ); assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_fmadd_pch(a, b, c); - let e = _mm_set1_pch(-2.0, 3.0); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_sqrt_round_ph() { + let a = _mm512_set1_ph(4.0); + let r = _mm512_maskz_sqrt_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + ); + let e = _mm512_set_ph( + 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, + 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, + ); + assert_eq_m512h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_sqrt_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let r = _mm_sqrt_sh(a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_sqrt_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_sqrt_sh(src, 0, a, b); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_mask_sqrt_sh(src, 1, a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_sqrt_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let r = _mm_maskz_sqrt_sh(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_maskz_sqrt_sh(1, a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_sqrt_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let r = _mm_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_sqrt_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = _mm_mask_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_sqrt_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); + let r = + _mm_maskz_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + assert_eq_m128h(r, e); + let r = + _mm_maskz_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_mask_fmadd_pch(a, 0b0101, b, c); - let e = _mm_setr_ph(-2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0); + unsafe fn test_mm_max_ph() { + let a = _mm_set1_ph(2.0); + let b = _mm_set1_ph(1.0); + let r = _mm_max_ph(a, b); + let e = _mm_set1_ph(2.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_mask3_fmadd_pch(a, b, c, 0b0101); - let e = _mm_setr_ph(-2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0); + unsafe fn test_mm_mask_max_ph() { + let a = _mm_set1_ph(2.0); + let b = _mm_set1_ph(1.0); + let src = _mm_set1_ph(3.0); + let r = _mm_mask_max_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_maskz_fmadd_pch(0b0101, a, b, c); - let e = _mm_setr_ph(-2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0); + unsafe fn test_mm_maskz_max_ph() { + let a = _mm_set1_ph(2.0); + let b = _mm_set1_ph(1.0); + let r = _mm_maskz_max_ph(0b01010101, a, b); + let e = _mm_set_ph(0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_fmadd_pch(a, b, c); - let e = _mm256_set1_pch(-2.0, 3.0); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_mask_fmadd_pch(a, 0b01010101, b, c); - let e = _mm256_setr_ph( - -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, - ); + unsafe fn test_mm256_max_ph() { + let a = _mm256_set1_ph(2.0); + let b = _mm256_set1_ph(1.0); + let r = _mm256_max_ph(a, b); + let e = _mm256_set1_ph(2.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_mask3_fmadd_pch(a, b, c, 0b01010101); - let e = _mm256_setr_ph( - -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, + unsafe fn test_mm256_mask_max_ph() { + let a = _mm256_set1_ph(2.0); + let b = _mm256_set1_ph(1.0); + let src = _mm256_set1_ph(3.0); + let r = _mm256_mask_max_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_maskz_fmadd_pch(0b01010101, a, b, c); - let e = _mm256_setr_ph( - -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, + unsafe fn test_mm256_maskz_max_ph() { + let a = _mm256_set1_ph(2.0); + let b = _mm256_set1_ph(1.0); + let r = _mm256_maskz_max_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_fmadd_pch(a, b, c); - let e = _mm512_set1_pch(-2.0, 3.0); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask_fmadd_pch(a, 0b0101010101010101, b, c); - let e = _mm512_setr_ph( - -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, - -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, - ); + unsafe fn test_mm512_max_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_max_ph(a, b); + let e = _mm512_set1_ph(2.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask3_fmadd_pch(a, b, c, 0b0101010101010101); - let e = _mm512_setr_ph( - -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, - -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, + unsafe fn test_mm512_mask_max_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let src = _mm512_set1_ph(3.0); + let r = _mm512_mask_max_ph(src, 0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, + 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_maskz_fmadd_pch(0b0101010101010101, a, b, c); - let e = _mm512_setr_ph( - -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, - -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, + unsafe fn test_mm512_maskz_max_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_maskz_max_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, + 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = - _mm512_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set1_pch(-2.0, 3.0); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, - 0b0101010101010101, - b, - c, - ); - let e = _mm512_setr_ph( - -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, - -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, -2.0, 3.0, 0.0, 1.0, - ); + unsafe fn test_mm512_max_round_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_max_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_ph(2.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask3_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask_max_round_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let src = _mm512_set1_ph(3.0); + let r = _mm512_mask_max_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, a, b, - c, - 0b0101010101010101, ); - let e = _mm512_setr_ph( - -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, - -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, -2.0, 3.0, 0.0, 3.0, + let e = _mm512_set_ph( + 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, + 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_maskz_fmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b0101010101010101, + unsafe fn test_mm512_maskz_max_round_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_maskz_max_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, a, b, - c, ); - let e = _mm512_setr_ph( - -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, - -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, -2.0, 3.0, 0.0, 0.0, + let e = _mm512_set_ph( + 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, + 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_fmadd_sch(a, b, c); - let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask_fmadd_sch(a, 0, b, c); - let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - let r = _mm_mask_fmadd_sch(a, 1, b, c); - let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_max_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_max_sh(a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask3_fmadd_sch(a, b, c, 0); - let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + unsafe fn test_mm_mask_max_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_max_sh(src, 0, a, b); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - let r = _mm_mask3_fmadd_sch(a, b, c, 1); - let e = _mm_setr_ph(-2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask_max_sh(src, 1, a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_maskz_fmadd_sch(0, a, b, c); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - let r = _mm_maskz_fmadd_sch(1, a, b, c); - let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_maskz_max_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_maskz_max_sh(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let r = _mm_maskz_max_sh(1, a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 0, b, c, - ); - let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - let r = _mm_mask_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 1, b, c, - ); - let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_max_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask3_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 0, + unsafe fn test_mm_mask_max_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, ); - let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - let r = _mm_mask3_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 1, + let r = _mm_mask_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, ); - let e = _mm_setr_ph(-2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_maskz_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0, a, b, c, - ); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - let r = _mm_maskz_fmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 1, a, b, c, - ); - let e = _mm_setr_ph(-2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_maskz_max_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = + _mm_maskz_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fcmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_fcmadd_pch(a, b, c); - let e = _mm_set1_pch(2.0, 3.0); + let r = + _mm_maskz_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fcmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_mask_fcmadd_pch(a, 0b0101, b, c); - let e = _mm_setr_ph(2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0); + unsafe fn test_mm_min_ph() { + let a = _mm_set1_ph(2.0); + let b = _mm_set1_ph(1.0); + let r = _mm_min_ph(a, b); + let e = _mm_set1_ph(1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fcmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_mask3_fcmadd_pch(a, b, c, 0b0101); - let e = _mm_setr_ph(2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0); + unsafe fn test_mm_mask_min_ph() { + let a = _mm_set1_ph(2.0); + let b = _mm_set1_ph(1.0); + let src = _mm_set1_ph(3.0); + let r = _mm_mask_min_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fcmadd_pch() { - let a = _mm_set1_pch(0.0, 1.0); - let b = _mm_set1_pch(0.0, 2.0); - let c = _mm_set1_pch(0.0, 3.0); - let r = _mm_maskz_fcmadd_pch(0b0101, a, b, c); - let e = _mm_setr_ph(2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0); + unsafe fn test_mm_maskz_min_ph() { + let a = _mm_set1_ph(2.0); + let b = _mm_set1_ph(1.0); + let r = _mm_maskz_min_ph(0b01010101, a, b); + let e = _mm_set_ph(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fcmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_fcmadd_pch(a, b, c); - let e = _mm256_set1_pch(2.0, 3.0); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fcmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_mask_fcmadd_pch(a, 0b01010101, b, c); - let e = _mm256_setr_ph( - 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, - ); + unsafe fn test_mm256_min_ph() { + let a = _mm256_set1_ph(2.0); + let b = _mm256_set1_ph(1.0); + let r = _mm256_min_ph(a, b); + let e = _mm256_set1_ph(1.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fcmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_mask3_fcmadd_pch(a, b, c, 0b01010101); - let e = _mm256_setr_ph( - 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, + unsafe fn test_mm256_mask_min_ph() { + let a = _mm256_set1_ph(2.0); + let b = _mm256_set1_ph(1.0); + let src = _mm256_set1_ph(3.0); + let r = _mm256_mask_min_ph(src, 0b0101010101010101, a, b); + let e = _mm256_set_ph( + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fcmadd_pch() { - let a = _mm256_set1_pch(0.0, 1.0); - let b = _mm256_set1_pch(0.0, 2.0); - let c = _mm256_set1_pch(0.0, 3.0); - let r = _mm256_maskz_fcmadd_pch(0b01010101, a, b, c); - let e = _mm256_setr_ph( - 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, + unsafe fn test_mm256_maskz_min_ph() { + let a = _mm256_set1_ph(2.0); + let b = _mm256_set1_ph(1.0); + let r = _mm256_maskz_min_ph(0b0101010101010101, a, b); + let e = _mm256_set_ph( + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fcmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_fcmadd_pch(a, b, c); - let e = _mm512_set1_pch(2.0, 3.0); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fcmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask_fcmadd_pch(a, 0b0101010101010101, b, c); - let e = _mm512_setr_ph( - 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, - 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, - ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fcmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask3_fcmadd_pch(a, b, c, 0b0101010101010101); - let e = _mm512_setr_ph( - 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, - 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, - ); + unsafe fn test_mm512_min_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_min_ph(a, b); + let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fcmadd_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_maskz_fcmadd_pch(0b0101010101010101, a, b, c); - let e = _mm512_setr_ph( - 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, - 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, + unsafe fn test_mm512_mask_min_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let src = _mm512_set1_ph(3.0); + let r = _mm512_mask_min_ph(src, 0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, + 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fcmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = - _mm512_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set1_pch(2.0, 3.0); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fcmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, - 0b0101010101010101, - b, - c, - ); - let e = _mm512_setr_ph( - 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, - 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, + unsafe fn test_mm512_maskz_min_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_maskz_min_ph(0b01010101010101010101010101010101, a, b); + let e = _mm512_set_ph( + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fcmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_mask3_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, - b, - c, - 0b0101010101010101, - ); - let e = _mm512_setr_ph( - 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, - 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, 2.0, 3.0, 0.0, 3.0, - ); + unsafe fn test_mm512_min_round_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_min_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fcmadd_round_pch() { - let a = _mm512_set1_pch(0.0, 1.0); - let b = _mm512_set1_pch(0.0, 2.0); - let c = _mm512_set1_pch(0.0, 3.0); - let r = _mm512_maskz_fcmadd_round_pch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b0101010101010101, + unsafe fn test_mm512_mask_min_round_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let src = _mm512_set1_ph(3.0); + let r = _mm512_mask_min_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, a, b, - c, - ); - let e = _mm512_setr_ph( - 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, - 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, 2.0, 3.0, 0.0, 0.0, + ); + let e = _mm512_set_ph( + 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, + 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fcmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_fcmadd_sch(a, b, c); - let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_maskz_min_round_ph() { + let a = _mm512_set1_ph(2.0); + let b = _mm512_set1_ph(1.0); + let r = _mm512_maskz_min_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + b, + ); + let e = _mm512_set_ph( + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fcmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask_fcmadd_sch(a, 0, b, c); - let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - let r = _mm_mask_fcmadd_sch(a, 1, b, c); - let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_min_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_min_sh(a, b); + let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fcmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask3_fcmadd_sch(a, b, c, 0); - let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + unsafe fn test_mm_mask_min_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_min_sh(src, 0, a, b); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - let r = _mm_mask3_fcmadd_sch(a, b, c, 1); - let e = _mm_setr_ph(2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let r = _mm_mask_min_sh(src, 1, a, b); + let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fcmadd_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_maskz_fcmadd_sch(0, a, b, c); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - let r = _mm_maskz_fcmadd_sch(1, a, b, c); - let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_maskz_min_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_maskz_min_sh(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fcmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + let r = _mm_maskz_min_sh(1, a, b); + let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fcmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 0, b, c, - ); - let e = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - let r = _mm_mask_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 1, b, c, - ); - let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_min_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = _mm_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fcmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_mask3_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 0, + unsafe fn test_mm_mask_min_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); + let r = _mm_mask_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, ); - let e = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - let r = _mm_mask3_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 1, + let r = _mm_mask_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, ); - let e = _mm_setr_ph(2.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); + let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fcmadd_round_sch() { - let a = _mm_setr_ph(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - let b = _mm_setr_ph(0.0, 2.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0); - let c = _mm_setr_ph(0.0, 3.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0); - let r = _mm_maskz_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0, a, b, c, - ); - let e = _mm_setr_ph(0.0, 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); - assert_eq_m128h(r, e); - let r = _mm_maskz_fcmadd_round_sch::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 1, a, b, c, - ); - let e = _mm_setr_ph(2.0, 3.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0); + unsafe fn test_mm_maskz_min_round_sh() { + let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); + let r = + _mm_maskz_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmadd_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_fmadd_ph(a, b, c); - let e = _mm_set1_ph(5.0); + let r = + _mm_maskz_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmadd_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask_fmadd_ph(a, 0b01010101, b, c); - let e = _mm_set_ph(1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0); + unsafe fn test_mm_getexp_ph() { + let a = _mm_set1_ph(3.0); + let r = _mm_getexp_ph(a); + let e = _mm_set1_ph(1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fmadd_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask3_fmadd_ph(a, b, c, 0b01010101); - let e = _mm_set_ph(3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0); + unsafe fn test_mm_mask_getexp_ph() { + let a = _mm_set1_ph(3.0); + let src = _mm_set1_ph(4.0); + let r = _mm_mask_getexp_ph(src, 0b01010101, a); + let e = _mm_set_ph(4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmadd_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_maskz_fmadd_ph(0b01010101, a, b, c); - let e = _mm_set_ph(0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0); + unsafe fn test_mm_maskz_getexp_ph() { + let a = _mm_set1_ph(3.0); + let r = _mm_maskz_getexp_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmadd_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_fmadd_ph(a, b, c); - let e = _mm256_set1_ph(5.0); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmadd_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask_fmadd_ph(a, 0b0101010101010101, b, c); - let e = _mm256_set_ph( - 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, - ); + unsafe fn test_mm256_getexp_ph() { + let a = _mm256_set1_ph(3.0); + let r = _mm256_getexp_ph(a); + let e = _mm256_set1_ph(1.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fmadd_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask3_fmadd_ph(a, b, c, 0b0101010101010101); + unsafe fn test_mm256_mask_getexp_ph() { + let a = _mm256_set1_ph(3.0); + let src = _mm256_set1_ph(4.0); + let r = _mm256_mask_getexp_ph(src, 0b0101010101010101, a); let e = _mm256_set_ph( - 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, + 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmadd_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_maskz_fmadd_ph(0b0101010101010101, a, b, c); + unsafe fn test_mm256_maskz_getexp_ph() { + let a = _mm256_set1_ph(3.0); + let r = _mm256_maskz_getexp_ph(0b0101010101010101, a); let e = _mm256_set_ph( - 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m256h(r, e); } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmadd_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_fmadd_ph(a, b, c); - let e = _mm512_set1_ph(5.0); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmadd_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmadd_ph(a, 0b01010101010101010101010101010101, b, c); - let e = _mm512_set_ph( - 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, - 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, - ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmadd_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmadd_ph(a, b, c, 0b01010101010101010101010101010101); - let e = _mm512_set_ph( - 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, - 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, - ); + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_getexp_ph() { + let a = _mm512_set1_ph(3.0); + let r = _mm512_getexp_ph(a); + let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmadd_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmadd_ph(0b01010101010101010101010101010101, a, b, c); + unsafe fn test_mm512_mask_getexp_ph() { + let a = _mm512_set1_ph(3.0); + let src = _mm512_set1_ph(4.0); + let r = _mm512_mask_getexp_ph(src, 0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, - 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, + 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, + 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmadd_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set1_ph(5.0); + unsafe fn test_mm512_maskz_getexp_ph() { + let a = _mm512_set1_ph(3.0); + let r = _mm512_maskz_getexp_ph(0b01010101010101010101010101010101, a); + let e = _mm512_set_ph( + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmadd_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, - 0b01010101010101010101010101010101, - b, - c, - ); - let e = _mm512_set_ph( - 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, - 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, 1.0, 5.0, - ); + unsafe fn test_mm512_getexp_round_ph() { + let a = _mm512_set1_ph(3.0); + let r = _mm512_getexp_round_ph::<_MM_FROUND_NO_EXC>(a); + let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmadd_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, - b, - c, + unsafe fn test_mm512_mask_getexp_round_ph() { + let a = _mm512_set1_ph(3.0); + let src = _mm512_set1_ph(4.0); + let r = _mm512_mask_getexp_round_ph::<_MM_FROUND_NO_EXC>( + src, 0b01010101010101010101010101010101, + a, ); let e = _mm512_set_ph( - 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, - 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, 3.0, 5.0, + 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, + 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmadd_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_maskz_getexp_round_ph() { + let a = _mm512_set1_ph(3.0); + let r = _mm512_maskz_getexp_round_ph::<_MM_FROUND_NO_EXC>( 0b01010101010101010101010101010101, a, - b, - c, ); let e = _mm512_set_ph( - 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, - 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0, + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, + 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmadd_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fmadd_sh(a, b, c); - let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmadd_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fmadd_sh(a, 0, b, c); + unsafe fn test_mm_getexp_sh() { + let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_getexp_sh(a, b); let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_fmadd_sh(a, 1, b, c); - let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fmadd_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fmadd_sh(a, b, c, 0); - let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + unsafe fn test_mm_mask_getexp_sh() { + let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(4.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_getexp_sh(src, 0, a, b); + let e = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask3_fmadd_sh(a, b, c, 1); - let e = _mm_setr_ph(5.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_getexp_sh(src, 1, a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmadd_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fmadd_sh(0, a, b, c); + unsafe fn test_mm_maskz_getexp_sh() { + let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_maskz_getexp_sh(0, a, b); let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_fmadd_sh(1, a, b, c); - let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmadd_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_maskz_getexp_sh(1, a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmadd_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 0, b, c, - ); + unsafe fn test_mm_getexp_round_sh() { + let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_getexp_round_sh::<_MM_FROUND_NO_EXC>(a, b); let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 1, b, c, - ); - let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fmadd_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 0, - ); - let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + unsafe fn test_mm_mask_getexp_round_sh() { + let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(4.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_getexp_round_sh::<_MM_FROUND_NO_EXC>(src, 0, a, b); + let e = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask3_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 1, - ); - let e = _mm_setr_ph(5.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_getexp_round_sh::<_MM_FROUND_NO_EXC>(src, 1, a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmadd_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0, a, b, c, - ); + unsafe fn test_mm_maskz_getexp_round_sh() { + let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_maskz_getexp_round_sh::<_MM_FROUND_NO_EXC>(0, a, b); let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_fmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 1, a, b, c, - ); - let e = _mm_setr_ph(5.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_fmsub_ph(a, b, c); - let e = _mm_set1_ph(-1.0); + let r = _mm_maskz_getexp_round_sh::<_MM_FROUND_NO_EXC>(1, a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask_fmsub_ph(a, 0b01010101, b, c); - let e = _mm_set_ph(1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0); + unsafe fn test_mm_getmant_ph() { + let a = _mm_set1_ph(10.0); + let r = _mm_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(a); + let e = _mm_set1_ph(1.25); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fmsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask3_fmsub_ph(a, b, c, 0b01010101); - let e = _mm_set_ph(3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0); + unsafe fn test_mm_mask_getmant_ph() { + let a = _mm_set1_ph(10.0); + let src = _mm_set1_ph(20.0); + let r = _mm_mask_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(src, 0b01010101, a); + let e = _mm_set_ph(20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_maskz_fmsub_ph(0b01010101, a, b, c); - let e = _mm_set_ph(0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0); + unsafe fn test_mm_maskz_getmant_ph() { + let a = _mm_set1_ph(10.0); + let r = _mm_maskz_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(0b01010101, a); + let e = _mm_set_ph(0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_fmsub_ph(a, b, c); - let e = _mm256_set1_ph(-1.0); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask_fmsub_ph(a, 0b0101010101010101, b, c); - let e = _mm256_set_ph( - 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, - ); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_getmant_ph() { + let a = _mm256_set1_ph(10.0); + let r = _mm256_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(a); + let e = _mm256_set1_ph(1.25); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fmsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask3_fmsub_ph(a, b, c, 0b0101010101010101); + unsafe fn test_mm256_mask_getmant_ph() { + let a = _mm256_set1_ph(10.0); + let src = _mm256_set1_ph(20.0); + let r = _mm256_mask_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>( + src, + 0b0101010101010101, + a, + ); let e = _mm256_set_ph( - 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, + 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, + 20.0, 1.25, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_maskz_fmsub_ph(0b0101010101010101, a, b, c); + unsafe fn test_mm256_maskz_getmant_ph() { + let a = _mm256_set1_ph(10.0); + let r = _mm256_maskz_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>( + 0b0101010101010101, + a, + ); let e = _mm256_set_ph( - 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, + 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_fmsub_ph(a, b, c); - let e = _mm512_set1_ph(-1.0); + unsafe fn test_mm512_getmant_ph() { + let a = _mm512_set1_ph(10.0); + let r = _mm512_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(a); + let e = _mm512_set1_ph(1.25); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmsub_ph(a, 0b01010101010101010101010101010101, b, c); - let e = _mm512_set_ph( - 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, - 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, + unsafe fn test_mm512_mask_getmant_ph() { + let a = _mm512_set1_ph(10.0); + let src = _mm512_set1_ph(20.0); + let r = _mm512_mask_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>( + src, + 0b01010101010101010101010101010101, + a, ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmsub_ph(a, b, c, 0b01010101010101010101010101010101); let e = _mm512_set_ph( - 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, - 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, + 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, + 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, + 20.0, 1.25, 20.0, 1.25, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmsub_ph(0b01010101010101010101010101010101, a, b, c); + unsafe fn test_mm512_maskz_getmant_ph() { + let a = _mm512_set1_ph(10.0); + let r = _mm512_maskz_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>( + 0b01010101010101010101010101010101, + a, + ); let e = _mm512_set_ph( - 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, - 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, + 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, + 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set1_ph(-1.0); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, - 0b01010101010101010101010101010101, - b, - c, - ); - let e = _mm512_set_ph( - 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, - 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, - ); + unsafe fn test_mm512_getmant_round_ph() { + let a = _mm512_set1_ph(10.0); + let r = + _mm512_getmant_round_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN, _MM_FROUND_NO_EXC>( + a, + ); + let e = _mm512_set1_ph(1.25); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, - b, - c, - 0b01010101010101010101010101010101, - ); + unsafe fn test_mm512_mask_getmant_round_ph() { + let a = _mm512_set1_ph(10.0); + let src = _mm512_set1_ph(20.0); + let r = _mm512_mask_getmant_round_ph::< + _MM_MANT_NORM_P75_1P5, + _MM_MANT_SIGN_NAN, + _MM_FROUND_NO_EXC, + >(src, 0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, - 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, 3.0, -1.0, + 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, + 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, + 20.0, 1.25, 20.0, 1.25, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, - a, - b, - c, - ); + unsafe fn test_mm512_maskz_getmant_round_ph() { + let a = _mm512_set1_ph(10.0); + let r = _mm512_maskz_getmant_round_ph::< + _MM_MANT_NORM_P75_1P5, + _MM_MANT_SIGN_NAN, + _MM_FROUND_NO_EXC, + >(0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, - 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, 0.0, -1.0, + 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, + 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fmsub_sh(a, b, c); - let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fmsub_sh(a, 0, b, c); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_fmsub_sh(a, 1, b, c); - let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_getmant_sh() { + let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_getmant_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(a, b); + let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fmsub_sh(a, b, c, 0); - let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + unsafe fn test_mm_mask_getmant_sh() { + let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(20.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_getmant_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(src, 0, a, b); + let e = _mm_setr_ph(20.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask3_fmsub_sh(a, b, c, 1); - let e = _mm_setr_ph(-1.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_getmant_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(src, 1, a, b); + let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fmsub_sh(0, a, b, c); + unsafe fn test_mm_maskz_getmant_sh() { + let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_maskz_getmant_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(0, a, b); let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_fmsub_sh(1, a, b, c); - let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 0, b, c, - ); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 1, b, c, - ); - let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_maskz_getmant_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(1, a, b); + let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 0, - ); - let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - assert_eq_m128h(r, e); - let r = _mm_mask3_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 1, + unsafe fn test_mm_getmant_round_sh() { + let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_getmant_round_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN, _MM_FROUND_NO_EXC>( + a, b, ); - let e = _mm_setr_ph(-1.0, 30., 31., 32., 33., 34., 35., 36.); + let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0, a, b, c, - ); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_mask_getmant_round_sh() { + let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(20.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_getmant_round_sh::< + _MM_MANT_NORM_P75_1P5, + _MM_MANT_SIGN_NAN, + _MM_FROUND_NO_EXC, + >(src, 0, a, b); + let e = _mm_setr_ph(20.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_fmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 1, a, b, c, - ); - let e = _mm_setr_ph(-1.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_mask_getmant_round_sh::< + _MM_MANT_NORM_P75_1P5, + _MM_MANT_SIGN_NAN, + _MM_FROUND_NO_EXC, + >(src, 1, a, b); + let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fnmadd_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_fnmadd_ph(a, b, c); - let e = _mm_set1_ph(1.0); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_getmant_round_sh() { + let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_maskz_getmant_round_sh::< + _MM_MANT_NORM_P75_1P5, + _MM_MANT_SIGN_NAN, + _MM_FROUND_NO_EXC, + >(0, a, b); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_maskz_getmant_round_sh::< + _MM_MANT_NORM_P75_1P5, + _MM_MANT_SIGN_NAN, + _MM_FROUND_NO_EXC, + >(1, a, b); + let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fnmadd_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask_fnmadd_ph(a, 0b01010101, b, c); - let e = _mm_set_ph(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0); + unsafe fn test_mm_roundscale_ph() { + let a = _mm_set1_ph(1.1); + let r = _mm_roundscale_ph::<0>(a); + let e = _mm_set1_ph(1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fnmadd_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask3_fnmadd_ph(a, b, c, 0b01010101); - let e = _mm_set_ph(3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0); + unsafe fn test_mm_mask_roundscale_ph() { + let a = _mm_set1_ph(1.1); + let src = _mm_set1_ph(2.0); + let r = _mm_mask_roundscale_ph::<0>(src, 0b01010101, a); + let e = _mm_set_ph(2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fnmadd_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_maskz_fnmadd_ph(0b01010101, a, b, c); + unsafe fn test_mm_maskz_roundscale_ph() { + let a = _mm_set1_ph(1.1); + let r = _mm_maskz_roundscale_ph::<0>(0b01010101, a); let e = _mm_set_ph(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fnmadd_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_fnmadd_ph(a, b, c); + unsafe fn test_mm256_roundscale_ph() { + let a = _mm256_set1_ph(1.1); + let r = _mm256_roundscale_ph::<0>(a); let e = _mm256_set1_ph(1.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fnmadd_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask_fnmadd_ph(a, 0b0101010101010101, b, c); - let e = _mm256_set_ph( - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - ); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fnmadd_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask3_fnmadd_ph(a, b, c, 0b0101010101010101); + unsafe fn test_mm256_mask_roundscale_ph() { + let a = _mm256_set1_ph(1.1); + let src = _mm256_set1_ph(2.0); + let r = _mm256_mask_roundscale_ph::<0>(src, 0b0101010101010101, a); let e = _mm256_set_ph( - 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, + 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fnmadd_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_maskz_fnmadd_ph(0b0101010101010101, a, b, c); + unsafe fn test_mm256_maskz_roundscale_ph() { + let a = _mm256_set1_ph(1.1); + let r = _mm256_maskz_roundscale_ph::<0>(0b0101010101010101, a); let e = _mm256_set_ph( 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ); @@ -17501,47 +22681,29 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fnmadd_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_fnmadd_ph(a, b, c); + unsafe fn test_mm512_roundscale_ph() { + let a = _mm512_set1_ph(1.1); + let r = _mm512_roundscale_ph::<0>(a); let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fnmadd_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fnmadd_ph(a, 0b01010101010101010101010101010101, b, c); - let e = _mm512_set_ph( - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fnmadd_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fnmadd_ph(a, b, c, 0b01010101010101010101010101010101); + unsafe fn test_mm512_mask_roundscale_ph() { + let a = _mm512_set1_ph(1.1); + let src = _mm512_set1_ph(2.0); + let r = _mm512_mask_roundscale_ph::<0>(src, 0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, - 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, + 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, + 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fnmadd_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fnmadd_ph(0b01010101010101010101010101010101, a, b, c); + unsafe fn test_mm512_maskz_roundscale_ph() { + let a = _mm512_set1_ph(1.1); + let r = _mm512_maskz_roundscale_ph::<0>(0b01010101010101010101010101010101, a); let e = _mm512_set_ph( 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, @@ -17550,62 +22712,35 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fnmadd_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = - _mm512_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + unsafe fn test_mm512_roundscale_round_ph() { + let a = _mm512_set1_ph(1.1); + let r = _mm512_roundscale_round_ph::<0, _MM_FROUND_NO_EXC>(a); let e = _mm512_set1_ph(1.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fnmadd_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, + unsafe fn test_mm512_mask_roundscale_round_ph() { + let a = _mm512_set1_ph(1.1); + let src = _mm512_set1_ph(2.0); + let r = _mm512_mask_roundscale_round_ph::<0, _MM_FROUND_NO_EXC>( + src, 0b01010101010101010101010101010101, - b, - c, - ); - let e = _mm512_set_ph( - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fnmadd_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( a, - b, - c, - 0b01010101010101010101010101010101, ); let e = _mm512_set_ph( - 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, - 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, + 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, + 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fnmadd_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fnmadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_maskz_roundscale_round_ph() { + let a = _mm512_set1_ph(1.1); + let r = _mm512_maskz_roundscale_round_ph::<0, _MM_FROUND_NO_EXC>( 0b01010101010101010101010101010101, a, - b, - c, ); let e = _mm512_set_ph( 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, @@ -17615,4329 +22750,4054 @@ mod tests { } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fnmadd_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fnmadd_sh(a, b, c); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fnmadd_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fnmadd_sh(a, 0, b, c); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_fnmadd_sh(a, 1, b, c); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fnmadd_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fnmadd_sh(a, b, c, 0); - let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - assert_eq_m128h(r, e); - let r = _mm_mask3_fnmadd_sh(a, b, c, 1); - let e = _mm_setr_ph(1.0, 30., 31., 32., 33., 34., 35., 36.); + unsafe fn test_mm_roundscale_sh() { + let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_roundscale_sh::<0>(a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fnmadd_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fnmadd_sh(0, a, b, c); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_mask_roundscale_sh() { + let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_roundscale_sh::<0>(src, 0, a, b); + let e = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_fnmadd_sh(1, a, b, c); + let r = _mm_mask_roundscale_sh::<0>(src, 1, a, b); let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fnmadd_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); + unsafe fn test_mm_maskz_roundscale_sh() { + let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_maskz_roundscale_sh::<0>(0, a, b); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_maskz_roundscale_sh::<0>(1, a, b); let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fnmadd_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 0, b, c, - ); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 1, b, c, - ); + unsafe fn test_mm_roundscale_round_sh() { + let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_roundscale_round_sh::<0, _MM_FROUND_NO_EXC>(a, b); let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fnmadd_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 0, - ); - let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + unsafe fn test_mm_mask_roundscale_round_sh() { + let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_roundscale_round_sh::<0, _MM_FROUND_NO_EXC>(src, 0, a, b); + let e = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask3_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 1, - ); - let e = _mm_setr_ph(1.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_roundscale_round_sh::<0, _MM_FROUND_NO_EXC>(src, 1, a, b); + let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fnmadd_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0, a, b, c, - ); + unsafe fn test_mm_maskz_roundscale_round_sh() { + let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_maskz_roundscale_round_sh::<0, _MM_FROUND_NO_EXC>(0, a, b); let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_fnmadd_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 1, a, b, c, - ); + let r = _mm_maskz_roundscale_round_sh::<0, _MM_FROUND_NO_EXC>(1, a, b); let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fnmsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_fnmsub_ph(a, b, c); - let e = _mm_set1_ph(-5.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fnmsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask_fnmsub_ph(a, 0b01010101, b, c); - let e = _mm_set_ph(1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0); + unsafe fn test_mm_scalef_ph() { + let a = _mm_set1_ph(1.); + let b = _mm_set1_ph(3.); + let r = _mm_scalef_ph(a, b); + let e = _mm_set1_ph(8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fnmsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask3_fnmsub_ph(a, b, c, 0b01010101); - let e = _mm_set_ph(3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0); + unsafe fn test_mm_mask_scalef_ph() { + let a = _mm_set1_ph(1.); + let b = _mm_set1_ph(3.); + let src = _mm_set1_ph(2.); + let r = _mm_mask_scalef_ph(src, 0b01010101, a, b); + let e = _mm_set_ph(2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fnmsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_maskz_fnmsub_ph(0b01010101, a, b, c); - let e = _mm_set_ph(0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0); + unsafe fn test_mm_maskz_scalef_ph() { + let a = _mm_set1_ph(1.); + let b = _mm_set1_ph(3.); + let r = _mm_maskz_scalef_ph(0b01010101, a, b); + let e = _mm_set_ph(0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fnmsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_fnmsub_ph(a, b, c); - let e = _mm256_set1_ph(-5.0); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fnmsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask_fnmsub_ph(a, 0b0101010101010101, b, c); - let e = _mm256_set_ph( - 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, - ); + unsafe fn test_mm256_scalef_ph() { + let a = _mm256_set1_ph(1.); + let b = _mm256_set1_ph(3.); + let r = _mm256_scalef_ph(a, b); + let e = _mm256_set1_ph(8.0); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fnmsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask3_fnmsub_ph(a, b, c, 0b0101010101010101); + unsafe fn test_mm256_mask_scalef_ph() { + let a = _mm256_set1_ph(1.); + let b = _mm256_set1_ph(3.); + let src = _mm256_set1_ph(2.); + let r = _mm256_mask_scalef_ph(src, 0b0101010101010101, a, b); let e = _mm256_set_ph( - 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, + 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fnmsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_maskz_fnmsub_ph(0b0101010101010101, a, b, c); + unsafe fn test_mm256_maskz_scalef_ph() { + let a = _mm256_set1_ph(1.); + let b = _mm256_set1_ph(3.); + let r = _mm256_maskz_scalef_ph(0b0101010101010101, a, b); let e = _mm256_set_ph( - 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, + 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fnmsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_fnmsub_ph(a, b, c); - let e = _mm512_set1_ph(-5.0); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fnmsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fnmsub_ph(a, 0b01010101010101010101010101010101, b, c); - let e = _mm512_set_ph( - 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, - 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, - ); + unsafe fn test_mm512_scalef_ph() { + let a = _mm512_set1_ph(1.); + let b = _mm512_set1_ph(3.); + let r = _mm512_scalef_ph(a, b); + let e = _mm512_set1_ph(8.0); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fnmsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fnmsub_ph(a, b, c, 0b01010101010101010101010101010101); + unsafe fn test_mm512_mask_scalef_ph() { + let a = _mm512_set1_ph(1.); + let b = _mm512_set1_ph(3.); + let src = _mm512_set1_ph(2.); + let r = _mm512_mask_scalef_ph(src, 0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, - 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, + 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, + 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fnmsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fnmsub_ph(0b01010101010101010101010101010101, a, b, c); + unsafe fn test_mm512_maskz_scalef_ph() { + let a = _mm512_set1_ph(1.); + let b = _mm512_set1_ph(3.); + let r = _mm512_maskz_scalef_ph(0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, - 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, + 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, + 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fnmsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = - _mm512_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set1_ph(-5.0); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fnmsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, - 0b01010101010101010101010101010101, - b, - c, - ); - let e = _mm512_set_ph( - 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, - 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, 1.0, -5.0, - ); + unsafe fn test_mm512_scalef_round_ph() { + let a = _mm512_set1_ph(1.); + let b = _mm512_set1_ph(3.); + let r = _mm512_scalef_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm512_set1_ph(8.0); assert_eq_m512h(r, e); } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fnmsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_scalef_round_ph() { + let a = _mm512_set1_ph(1.); + let b = _mm512_set1_ph(3.); + let src = _mm512_set1_ph(2.); + let r = _mm512_mask_scalef_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, a, b, - c, - 0b01010101010101010101010101010101, ); let e = _mm512_set_ph( - 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, - 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, 3.0, -5.0, + 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, + 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fnmsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fnmsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_maskz_scalef_round_ph() { + let a = _mm512_set1_ph(1.); + let b = _mm512_set1_ph(3.); + let r = _mm512_maskz_scalef_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b01010101010101010101010101010101, a, b, - c, ); let e = _mm512_set_ph( - 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, - 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, 0.0, -5.0, + 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, + 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fnmsub_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fnmsub_sh(a, b, c); - let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fnmsub_sh() { + unsafe fn test_mm_scalef_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fnmsub_sh(a, 0, b, c); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_fnmsub_sh(a, 1, b, c); - let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_scalef_sh(a, b); + let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fnmsub_sh() { + unsafe fn test_mm_mask_scalef_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fnmsub_sh(a, b, c, 0); - let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(2.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_scalef_sh(src, 0, a, b); + let e = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask3_fnmsub_sh(a, b, c, 1); - let e = _mm_setr_ph(-5.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_scalef_sh(src, 1, a, b); + let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fnmsub_sh() { + unsafe fn test_mm_maskz_scalef_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fnmsub_sh(0, a, b, c); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_maskz_scalef_sh(0, a, b); let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_fnmsub_sh(1, a, b, c); - let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fnmsub_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_maskz_scalef_sh(1, a, b); + let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fnmsub_round_sh() { + unsafe fn test_mm_scalef_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 0, b, c, - ); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, 1, b, c, - ); - let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_scalef_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask3_fnmsub_round_sh() { + unsafe fn test_mm_mask_scalef_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask3_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 0, + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(2.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_scalef_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, ); - let e = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); + let e = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_mask3_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, b, c, 1, + let r = _mm_mask_scalef_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, ); - let e = _mm_setr_ph(-5.0, 30., 31., 32., 33., 34., 35., 36.); + let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_fnmsub_round_sh() { + unsafe fn test_mm_maskz_scalef_round_sh() { let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(2.0, 20., 21., 22., 23., 24., 25., 26.); - let c = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_maskz_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0, a, b, c, - ); + let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + let r = + _mm_maskz_scalef_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); - let r = _mm_maskz_fnmsub_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 1, a, b, c, - ); - let e = _mm_setr_ph(-5.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmaddsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_fmaddsub_ph(a, b, c); - let e = _mm_set_ph(5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0); + let r = + _mm_maskz_scalef_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmaddsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask_fmaddsub_ph(a, 0b00110011, b, c); - let e = _mm_set_ph(1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0); + unsafe fn test_mm_reduce_ph() { + let a = _mm_set1_ph(1.25); + let r = _mm_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(a); + let e = _mm_set1_ph(0.25); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fmaddsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask3_fmaddsub_ph(a, b, c, 0b00110011); - let e = _mm_set_ph(3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0); + unsafe fn test_mm_mask_reduce_ph() { + let a = _mm_set1_ph(1.25); + let src = _mm_set1_ph(2.0); + let r = _mm_mask_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(src, 0b01010101, a); + let e = _mm_set_ph(2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmaddsub_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_maskz_fmaddsub_ph(0b00110011, a, b, c); - let e = _mm_set_ph(0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0); + unsafe fn test_mm_maskz_reduce_ph() { + let a = _mm_set1_ph(1.25); + let r = _mm_maskz_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(0b01010101, a); + let e = _mm_set_ph(0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmaddsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_fmaddsub_ph(a, b, c); - let e = _mm256_set_ph( - 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, - ); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmaddsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask_fmaddsub_ph(a, 0b0011001100110011, b, c); - let e = _mm256_set_ph( - 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, - ); + unsafe fn test_mm256_reduce_ph() { + let a = _mm256_set1_ph(1.25); + let r = _mm256_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(a); + let e = _mm256_set1_ph(0.25); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fmaddsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask3_fmaddsub_ph(a, b, c, 0b0011001100110011); + unsafe fn test_mm256_mask_reduce_ph() { + let a = _mm256_set1_ph(1.25); + let src = _mm256_set1_ph(2.0); + let r = _mm256_mask_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(src, 0b0101010101010101, a); let e = _mm256_set_ph( - 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, + 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmaddsub_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_maskz_fmaddsub_ph(0b0011001100110011, a, b, c); + unsafe fn test_mm256_maskz_reduce_ph() { + let a = _mm256_set1_ph(1.25); + let r = _mm256_maskz_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(0b0101010101010101, a); let e = _mm256_set_ph( - 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, + 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmaddsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_fmaddsub_ph(a, b, c); - let e = _mm512_set_ph( - 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, - 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, - ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmaddsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmaddsub_ph(a, 0b00110011001100110011001100110011, b, c); - let e = _mm512_set_ph( - 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, - 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, - ); + unsafe fn test_mm512_reduce_ph() { + let a = _mm512_set1_ph(1.25); + let r = _mm512_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(a); + let e = _mm512_set1_ph(0.25); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmaddsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmaddsub_ph(a, b, c, 0b00110011001100110011001100110011); + unsafe fn test_mm512_mask_reduce_ph() { + let a = _mm512_set1_ph(1.25); + let src = _mm512_set1_ph(2.0); + let r = _mm512_mask_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>( + src, + 0b01010101010101010101010101010101, + a, + ); let e = _mm512_set_ph( - 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, - 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, + 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, + 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmaddsub_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmaddsub_ph(0b00110011001100110011001100110011, a, b, c); + unsafe fn test_mm512_maskz_reduce_ph() { + let a = _mm512_set1_ph(1.25); + let r = _mm512_maskz_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>( + 0b01010101010101010101010101010101, + a, + ); let e = _mm512_set_ph( - 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, - 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, + 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, + 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmaddsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = - _mm512_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set_ph( - 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, - 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, - ); + unsafe fn test_mm512_reduce_round_ph() { + let a = _mm512_set1_ph(1.25); + let r = _mm512_reduce_round_ph::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>(a); + let e = _mm512_set1_ph(0.25); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmaddsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask_reduce_round_ph() { + let a = _mm512_set1_ph(1.25); + let src = _mm512_set1_ph(2.0); + let r = _mm512_mask_reduce_round_ph::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>( + src, + 0b01010101010101010101010101010101, a, - 0b00110011001100110011001100110011, - b, - c, ); let e = _mm512_set_ph( - 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, - 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, 1.0, 1.0, 5.0, -1.0, + 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, + 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmaddsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_maskz_reduce_round_ph() { + let a = _mm512_set1_ph(1.25); + let r = _mm512_maskz_reduce_round_ph::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>( + 0b01010101010101010101010101010101, a, - b, - c, - 0b00110011001100110011001100110011, ); let e = _mm512_set_ph( - 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, - 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, 3.0, 3.0, 5.0, -1.0, + 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, + 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmaddsub_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmaddsub_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b00110011001100110011001100110011, - a, - b, - c, + unsafe fn test_mm_reduce_sh() { + let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_reduce_sh::<{ 16 | _MM_FROUND_TO_ZERO }>(a, b); + let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_reduce_sh() { + let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(2.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_reduce_sh::<{ 16 | _MM_FROUND_TO_ZERO }>(src, 0, a, b); + let e = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_mask_reduce_sh::<{ 16 | _MM_FROUND_TO_ZERO }>(src, 1, a, b); + let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_reduce_sh() { + let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_maskz_reduce_sh::<{ 16 | _MM_FROUND_TO_ZERO }>(0, a, b); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_maskz_reduce_sh::<{ 16 | _MM_FROUND_TO_ZERO }>(1, a, b); + let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_reduce_round_sh() { + let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); + let r = _mm_reduce_round_sh::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>(a, b); + let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_reduce_round_sh() { + let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); + let src = _mm_setr_ph(2.0, 30., 31., 32., 33., 34., 35., 36.); + let r = _mm_mask_reduce_round_sh::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>( + src, 0, a, b, ); - let e = _mm512_set_ph( - 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, - 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, 0.0, 0.0, 5.0, -1.0, + let e = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = _mm_mask_reduce_round_sh::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>( + src, 1, a, b, ); - assert_eq_m512h(r, e); + let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fmsubadd_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_fmsubadd_ph(a, b, c); - let e = _mm_set_ph(-1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_maskz_reduce_round_sh() { + let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); + let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); + let r = + _mm_maskz_reduce_round_sh::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>(0, a, b); + let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + assert_eq_m128h(r, e); + let r = + _mm_maskz_reduce_round_sh::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>(1, a, b); + let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fmsubadd_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask_fmsubadd_ph(a, 0b00110011, b, c); - let e = _mm_set_ph(1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0); - assert_eq_m128h(r, e); + unsafe fn test_mm_reduce_add_ph() { + let a = _mm_set1_ph(2.0); + let r = _mm_reduce_add_ph(a); + assert_eq!(r, 16.0); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask3_fmsubadd_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_mask3_fmsubadd_ph(a, b, c, 0b00110011); - let e = _mm_set_ph(3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0); - assert_eq_m128h(r, e); + unsafe fn test_mm256_reduce_add_ph() { + let a = _mm256_set1_ph(2.0); + let r = _mm256_reduce_add_ph(a); + assert_eq!(r, 32.0); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_reduce_add_ph() { + let a = _mm512_set1_ph(2.0); + let r = _mm512_reduce_add_ph(a); + assert_eq!(r, 64.0); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_fmsubadd_ph() { - let a = _mm_set1_ph(1.0); - let b = _mm_set1_ph(2.0); - let c = _mm_set1_ph(3.0); - let r = _mm_maskz_fmsubadd_ph(0b00110011, a, b, c); - let e = _mm_set_ph(0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0); - assert_eq_m128h(r, e); + unsafe fn test_mm_reduce_mul_ph() { + let a = _mm_set1_ph(2.0); + let r = _mm_reduce_mul_ph(a); + assert_eq!(r, 256.0); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fmsubadd_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_fmsubadd_ph(a, b, c); - let e = _mm256_set_ph( - -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, - ); - assert_eq_m256h(r, e); + unsafe fn test_mm256_reduce_mul_ph() { + let a = _mm256_set1_ph(2.0); + let r = _mm256_reduce_mul_ph(a); + assert_eq!(r, 65536.0); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_reduce_mul_ph() { + let a = _mm512_set1_ph(2.0); + let r = _mm512_reduce_mul_ph(a); + assert_eq!(r, 16777216.0); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fmsubadd_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask_fmsubadd_ph(a, 0b0011001100110011, b, c); - let e = _mm256_set_ph( - 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, - ); - assert_eq_m256h(r, e); + unsafe fn test_mm_reduce_max_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_reduce_max_ph(a); + assert_eq!(r, 8.0); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask3_fmsubadd_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_mask3_fmsubadd_ph(a, b, c, 0b0011001100110011); - let e = _mm256_set_ph( - 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, + unsafe fn test_mm256_reduce_max_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - assert_eq_m256h(r, e); + let r = _mm256_reduce_max_ph(a); + assert_eq!(r, 16.0); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_reduce_max_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let r = _mm512_reduce_max_ph(a); + assert_eq!(r, 32.0); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_fmsubadd_ph() { - let a = _mm256_set1_ph(1.0); - let b = _mm256_set1_ph(2.0); - let c = _mm256_set1_ph(3.0); - let r = _mm256_maskz_fmsubadd_ph(0b0011001100110011, a, b, c); - let e = _mm256_set_ph( - 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, - ); - assert_eq_m256h(r, e); + unsafe fn test_mm_reduce_min_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_reduce_min_ph(a); + assert_eq!(r, 1.0); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmsubadd_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_fmsubadd_ph(a, b, c); - let e = _mm512_set_ph( - -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, - -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_reduce_min_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - assert_eq_m512h(r, e); + let r = _mm256_reduce_min_ph(a); + assert_eq!(r, 1.0); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmsubadd_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmsubadd_ph(a, 0b00110011001100110011001100110011, b, c); - let e = _mm512_set_ph( - 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, - 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, + unsafe fn test_mm512_reduce_min_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); - assert_eq_m512h(r, e); + let r = _mm512_reduce_min_ph(a); + assert_eq!(r, 1.0); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmsubadd_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmsubadd_ph(a, b, c, 0b00110011001100110011001100110011); - let e = _mm512_set_ph( - 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, - 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_fpclass_ph_mask() { + let a = _mm_set_ph( + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal ); - assert_eq_m512h(r, e); + let r = _mm_fpclass_ph_mask::<0x18>(a); // infinities + assert_eq!(r, 0b01100000); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmsubadd_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmsubadd_ph(0b00110011001100110011001100110011, a, b, c); - let e = _mm512_set_ph( - 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, - 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_fpclass_ph_mask() { + let a = _mm_set_ph( + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal ); - assert_eq_m512h(r, e); + let r = _mm_mask_fpclass_ph_mask::<0x18>(0b01010101, a); + assert_eq!(r, 0b01000000); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fmsubadd_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = - _mm512_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b, c); - let e = _mm512_set_ph( - -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, - -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, -1.0, 5.0, + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_fpclass_ph_mask() { + let a = _mm256_set_ph( + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal ); - assert_eq_m512h(r, e); + let r = _mm256_fpclass_ph_mask::<0x18>(a); // infinities + assert_eq!(r, 0b0110000001100000); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fmsubadd_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, - 0b00110011001100110011001100110011, - b, - c, - ); - let e = _mm512_set_ph( - 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, - 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, 1.0, 1.0, -1.0, 5.0, + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_fpclass_ph_mask() { + let a = _mm256_set_ph( + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal ); - assert_eq_m512h(r, e); + let r = _mm256_mask_fpclass_ph_mask::<0x18>(0b0101010101010101, a); + assert_eq!(r, 0b0100000001000000); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask3_fmsubadd_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_mask3_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - a, - b, - c, - 0b00110011001100110011001100110011, - ); - let e = _mm512_set_ph( - 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, - 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, 3.0, 3.0, -1.0, 5.0, + unsafe fn test_mm512_fpclass_ph_mask() { + let a = _mm512_set_ph( + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal ); - assert_eq_m512h(r, e); + let r = _mm512_fpclass_ph_mask::<0x18>(a); // infinities + assert_eq!(r, 0b01100000011000000110000001100000); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_fmsubadd_round_ph() { - let a = _mm512_set1_ph(1.0); - let b = _mm512_set1_ph(2.0); - let c = _mm512_set1_ph(3.0); - let r = _mm512_maskz_fmsubadd_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b00110011001100110011001100110011, - a, - b, - c, - ); - let e = _mm512_set_ph( - 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, - 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, 0.0, 0.0, -1.0, 5.0, + unsafe fn test_mm512_mask_fpclass_ph_mask() { + let a = _mm512_set_ph( + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal + 1., + f16::INFINITY, + f16::NEG_INFINITY, + 0.0, + -0.0, + -2.0, + f16::NAN, + 5.9e-8, // Denormal ); - assert_eq_m512h(r, e); + let r = _mm512_mask_fpclass_ph_mask::<0x18>(0b01010101010101010101010101010101, a); + assert_eq!(r, 0b01000000010000000100000001000000); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_rcp_ph() { - let a = _mm_set1_ph(2.0); - let r = _mm_rcp_ph(a); - let e = _mm_set1_ph(0.5); - assert_eq_m128h(r, e); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_fpclass_sh_mask() { + let a = _mm_set_sh(f16::INFINITY); + let r = _mm_fpclass_sh_mask::<0x18>(a); + assert_eq!(r, 1); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_rcp_ph() { - let a = _mm_set1_ph(2.0); - let src = _mm_set1_ph(1.0); - let r = _mm_mask_rcp_ph(src, 0b01010101, a); - let e = _mm_set_ph(1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5); - assert_eq_m128h(r, e); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_mask_fpclass_sh_mask() { + let a = _mm_set_sh(f16::INFINITY); + let r = _mm_mask_fpclass_sh_mask::<0x18>(0, a); + assert_eq!(r, 0); + let r = _mm_mask_fpclass_sh_mask::<0x18>(1, a); + assert_eq!(r, 1); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_rcp_ph() { - let a = _mm_set1_ph(2.0); - let r = _mm_maskz_rcp_ph(0b01010101, a); - let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); + unsafe fn test_mm_mask_blend_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_set_ph(-1.0, -2.0, -3.0, -4.0, -5.0, -6.0, -7.0, -8.0); + let r = _mm_mask_blend_ph(0b01010101, a, b); + let e = _mm_set_ph(1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_rcp_ph() { - let a = _mm256_set1_ph(2.0); - let r = _mm256_rcp_ph(a); - let e = _mm256_set1_ph(0.5); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_rcp_ph() { - let a = _mm256_set1_ph(2.0); - let src = _mm256_set1_ph(1.0); - let r = _mm256_mask_rcp_ph(src, 0b0101010101010101, a); - let e = _mm256_set_ph( - 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, + unsafe fn test_mm256_mask_blend_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_rcp_ph() { - let a = _mm256_set1_ph(2.0); - let r = _mm256_maskz_rcp_ph(0b0101010101010101, a); + let b = _mm256_set_ph( + -1.0, -2.0, -3.0, -4.0, -5.0, -6.0, -7.0, -8.0, -9.0, -10.0, -11.0, -12.0, -13.0, + -14.0, -15.0, -16.0, + ); + let r = _mm256_mask_blend_ph(0b0101010101010101, a, b); let e = _mm256_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, -14.0, 15.0, + -16.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_rcp_ph() { - let a = _mm512_set1_ph(2.0); - let r = _mm512_rcp_ph(a); - let e = _mm512_set1_ph(0.5); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_rcp_ph() { - let a = _mm512_set1_ph(2.0); - let src = _mm512_set1_ph(1.0); - let r = _mm512_mask_rcp_ph(src, 0b01010101010101010101010101010101, a); - let e = _mm512_set_ph( - 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, - 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, + unsafe fn test_mm512_mask_blend_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_rcp_ph() { - let a = _mm512_set1_ph(2.0); - let r = _mm512_maskz_rcp_ph(0b01010101010101010101010101010101, a); + let b = _mm512_set_ph( + -1.0, -2.0, -3.0, -4.0, -5.0, -6.0, -7.0, -8.0, -9.0, -10.0, -11.0, -12.0, -13.0, + -14.0, -15.0, -16.0, -17.0, -18.0, -19.0, -20.0, -21.0, -22.0, -23.0, -24.0, -25.0, + -26.0, -27.0, -28.0, -29.0, -30.0, -31.0, -32.0, + ); + let r = _mm512_mask_blend_ph(0b01010101010101010101010101010101, a, b); let e = _mm512_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, - 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, -14.0, 15.0, + -16.0, 17.0, -18.0, 19.0, -20.0, 21.0, -22.0, 23.0, -24.0, 25.0, -26.0, 27.0, -28.0, + 29.0, -30.0, 31.0, -32.0, ); assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_rcp_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let r = _mm_rcp_sh(a, b); - let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_rcp_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); - let r = _mm_mask_rcp_sh(src, 0, a, b); - let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - let r = _mm_mask_rcp_sh(src, 1, a, b); - let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_rcp_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let r = _mm_maskz_rcp_sh(0, a, b); - let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - let r = _mm_maskz_rcp_sh(1, a, b); - let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_rsqrt_ph() { - let a = _mm_set1_ph(4.0); - let r = _mm_rsqrt_ph(a); - let e = _mm_set1_ph(0.5); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_rsqrt_ph() { - let a = _mm_set1_ph(4.0); - let src = _mm_set1_ph(1.0); - let r = _mm_mask_rsqrt_ph(src, 0b01010101, a); - let e = _mm_set_ph(1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5); - assert_eq_m128h(r, e); - } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_rsqrt_ph() { - let a = _mm_set1_ph(4.0); - let r = _mm_maskz_rsqrt_ph(0b01010101, a); - let e = _mm_set_ph(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); + unsafe fn test_mm_permutex2var_ph() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let b = _mm_setr_ph(9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let idx = _mm_setr_epi16(0, 2, 4, 6, 8, 10, 12, 14); + let r = _mm_permutex2var_ph(a, idx, b); + let e = _mm_setr_ph(1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_rsqrt_ph() { - let a = _mm256_set1_ph(4.0); - let r = _mm256_rsqrt_ph(a); - let e = _mm256_set1_ph(0.5); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_rsqrt_ph() { - let a = _mm256_set1_ph(4.0); - let src = _mm256_set1_ph(1.0); - let r = _mm256_mask_rsqrt_ph(src, 0b0101010101010101, a); - let e = _mm256_set_ph( - 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, + unsafe fn test_mm256_permutex2var_ph() { + let a = _mm256_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_rsqrt_ph() { - let a = _mm256_set1_ph(4.0); - let r = _mm256_maskz_rsqrt_ph(0b0101010101010101, a); - let e = _mm256_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + let b = _mm256_setr_ph( + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let idx = _mm256_setr_epi16(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + let r = _mm256_permutex2var_ph(a, idx, b); + let e = _mm256_setr_ph( + 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, 23.0, 25.0, 27.0, 29.0, + 31.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_rsqrt_ph() { - let a = _mm512_set1_ph(4.0); - let r = _mm512_rsqrt_ph(a); - let e = _mm512_set1_ph(0.5); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_rsqrt_ph() { - let a = _mm512_set1_ph(4.0); - let src = _mm512_set1_ph(1.0); - let r = _mm512_mask_rsqrt_ph(src, 0b01010101010101010101010101010101, a); - let e = _mm512_set_ph( - 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, - 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, 1.0, 0.5, + unsafe fn test_mm512_permutex2var_ph() { + let a = _mm512_setr_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_rsqrt_ph() { - let a = _mm512_set1_ph(4.0); - let r = _mm512_maskz_rsqrt_ph(0b01010101010101010101010101010101, a); - let e = _mm512_set_ph( - 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, - 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + let b = _mm512_setr_ph( + 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, + 47.0, 48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, + 61.0, 62.0, 63.0, 64.0, + ); + let idx = _mm512_set_epi16( + 62, 60, 58, 56, 54, 52, 50, 48, 46, 44, 42, 40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, + 18, 16, 14, 12, 10, 8, 6, 4, 2, 0, + ); + let r = _mm512_permutex2var_ph(a, idx, b); + let e = _mm512_setr_ph( + 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, 23.0, 25.0, 27.0, 29.0, + 31.0, 33.0, 35.0, 37.0, 39.0, 41.0, 43.0, 45.0, 47.0, 49.0, 51.0, 53.0, 55.0, 57.0, + 59.0, 61.0, 63.0, ); assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_rsqrt_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let r = _mm_rsqrt_sh(a, b); - let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_permutexvar_ph() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let idx = _mm_set_epi16(0, 2, 4, 6, 1, 3, 5, 7); + let r = _mm_permutexvar_ph(idx, a); + let e = _mm_setr_ph(1.0, 3.0, 5.0, 7.0, 2.0, 4.0, 6.0, 8.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_rsqrt_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); - let r = _mm_mask_rsqrt_sh(src, 0, a, b); - let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - let r = _mm_mask_rsqrt_sh(src, 1, a, b); - let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_permutexvar_ph() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let idx = _mm256_set_epi16(0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15); + let r = _mm256_permutexvar_ph(idx, a); + let e = _mm256_setr_ph( + 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_rsqrt_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let r = _mm_maskz_rsqrt_sh(0, a, b); - let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - let r = _mm_maskz_rsqrt_sh(1, a, b); - let e = _mm_setr_ph(0.5, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_permutexvar_ph() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let idx = _mm512_set_epi16( + 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 1, 3, 5, 7, 9, 11, 13, 15, + 17, 19, 21, 23, 25, 27, 29, 31, + ); + let r = _mm512_permutexvar_ph(idx, a); + let e = _mm512_setr_ph( + 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, 23.0, 25.0, 27.0, 29.0, + 31.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0, 22.0, 24.0, 26.0, 28.0, + 30.0, 32.0, + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_sqrt_ph() { - let a = _mm_set1_ph(4.0); - let r = _mm_sqrt_ph(a); - let e = _mm_set1_ph(2.0); + unsafe fn test_mm_cvtepi16_ph() { + let a = _mm_set_epi16(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm_cvtepi16_ph(a); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_sqrt_ph() { - let a = _mm_set1_ph(4.0); - let src = _mm_set1_ph(1.0); - let r = _mm_mask_sqrt_ph(src, 0b01010101, a); - let e = _mm_set_ph(1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0); + unsafe fn test_mm_mask_cvtepi16_ph() { + let a = _mm_set_epi16(1, 2, 3, 4, 5, 6, 7, 8); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_cvtepi16_ph(src, 0b01010101, a); + let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_sqrt_ph() { - let a = _mm_set1_ph(4.0); - let r = _mm_maskz_sqrt_ph(0b01010101, a); - let e = _mm_set_ph(0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0); + unsafe fn test_mm_maskz_cvtepi16_ph() { + let a = _mm_set_epi16(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm_maskz_cvtepi16_ph(0b01010101, a); + let e = _mm_set_ph(0., 2., 0., 4., 0., 6., 0., 8.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_sqrt_ph() { - let a = _mm256_set1_ph(4.0); - let r = _mm256_sqrt_ph(a); - let e = _mm256_set1_ph(2.0); + unsafe fn test_mm256_cvtepi16_ph() { + let a = _mm256_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let r = _mm256_cvtepi16_ph(a); + let e = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_sqrt_ph() { - let a = _mm256_set1_ph(4.0); - let src = _mm256_set1_ph(1.0); - let r = _mm256_mask_sqrt_ph(src, 0b0101010101010101, a); + unsafe fn test_mm256_mask_cvtepi16_ph() { + let a = _mm256_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let src = _mm256_set_ph( + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., + ); + let r = _mm256_mask_cvtepi16_ph(src, 0b0101010101010101, a); let e = _mm256_set_ph( - 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, + 10., 2., 12., 4., 14., 6., 16., 8., 18., 10., 20., 12., 22., 14., 24., 16., ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_sqrt_ph() { - let a = _mm256_set1_ph(4.0); - let r = _mm256_maskz_sqrt_ph(0b0101010101010101, a); + unsafe fn test_mm256_maskz_cvtepi16_ph() { + let a = _mm256_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let r = _mm256_maskz_cvtepi16_ph(0b0101010101010101, a); let e = _mm256_set_ph( - 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, + 0., 2., 0., 4., 0., 6., 0., 8., 0., 10., 0., 12., 0., 14., 0., 16., ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_sqrt_ph() { - let a = _mm512_set1_ph(4.0); - let r = _mm512_sqrt_ph(a); - let e = _mm512_set1_ph(2.0); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_sqrt_ph() { - let a = _mm512_set1_ph(4.0); - let src = _mm512_set1_ph(1.0); - let r = _mm512_mask_sqrt_ph(src, 0b01010101010101010101010101010101, a); + unsafe fn test_mm512_cvtepi16_ph() { + let a = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + let r = _mm512_cvtepi16_ph(a); let e = _mm512_set_ph( - 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, - 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_sqrt_ph() { - let a = _mm512_set1_ph(4.0); - let r = _mm512_maskz_sqrt_ph(0b01010101010101010101010101010101, a); + unsafe fn test_mm512_mask_cvtepi16_ph() { + let a = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + let src = _mm512_set_ph( + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., + 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., + ); + let r = _mm512_mask_cvtepi16_ph(src, 0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, - 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, + 10., 2., 12., 4., 14., 6., 16., 8., 18., 10., 20., 12., 22., 14., 24., 16., 26., 18., + 28., 20., 30., 22., 32., 24., 34., 26., 36., 28., 38., 30., 40., 32., ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_sqrt_round_ph() { - let a = _mm512_set1_ph(4.0); - let r = _mm512_sqrt_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); - let e = _mm512_set1_ph(2.0); + unsafe fn test_mm512_maskz_cvtepi16_ph() { + let a = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + let r = _mm512_maskz_cvtepi16_ph(0b01010101010101010101010101010101, a); + let e = _mm512_set_ph( + 0., 2., 0., 4., 0., 6., 0., 8., 0., 10., 0., 12., 0., 14., 0., 16., 0., 18., 0., 20., + 0., 22., 0., 24., 0., 26., 0., 28., 0., 30., 0., 32., + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_sqrt_round_ph() { - let a = _mm512_set1_ph(4.0); - let src = _mm512_set1_ph(1.0); - let r = _mm512_mask_sqrt_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, - 0b01010101010101010101010101010101, - a, + unsafe fn test_mm512_cvt_roundepi16_ph() { + let a = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, ); + let r = _mm512_cvt_roundepi16_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); let e = _mm512_set_ph( - 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, - 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_sqrt_round_ph() { - let a = _mm512_set1_ph(4.0); - let r = _mm512_maskz_sqrt_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask_cvt_roundepi16_ph() { + let a = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + let src = _mm512_set_ph( + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., + 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., + ); + let r = _mm512_mask_cvt_roundepi16_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0b01010101010101010101010101010101, a, ); let e = _mm512_set_ph( - 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, - 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, + 10., 2., 12., 4., 14., 6., 16., 8., 18., 10., 20., 12., 22., 14., 24., 16., 26., 18., + 28., 20., 30., 22., 32., 24., 34., 26., 36., 28., 38., 30., 40., 32., ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_sqrt_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let r = _mm_sqrt_sh(a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_sqrt_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); - let r = _mm_mask_sqrt_sh(src, 0, a, b); - let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - let r = _mm_mask_sqrt_sh(src, 1, a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_sqrt_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let r = _mm_maskz_sqrt_sh(0, a, b); - let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - let r = _mm_maskz_sqrt_sh(1, a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_sqrt_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let r = _mm_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_sqrt_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); - let r = _mm_mask_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, + unsafe fn test_mm512_maskz_cvt_roundepi16_ph() { + let a = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, ); - let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - let r = _mm_mask_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, + let r = _mm512_maskz_cvt_roundepi16_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, ); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_sqrt_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(4.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0); - let r = - _mm_maskz_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - let r = - _mm_maskz_sqrt_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); + let e = _mm512_set_ph( + 0., 2., 0., 4., 0., 6., 0., 8., 0., 10., 0., 12., 0., 14., 0., 16., 0., 18., 0., 20., + 0., 22., 0., 24., 0., 26., 0., 28., 0., 30., 0., 32., + ); + assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_max_ph() { - let a = _mm_set1_ph(2.0); - let b = _mm_set1_ph(1.0); - let r = _mm_max_ph(a, b); - let e = _mm_set1_ph(2.0); + unsafe fn test_mm_cvtepu16_ph() { + let a = _mm_set_epi16(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm_cvtepu16_ph(a); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_max_ph() { - let a = _mm_set1_ph(2.0); - let b = _mm_set1_ph(1.0); - let src = _mm_set1_ph(3.0); - let r = _mm_mask_max_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0); + unsafe fn test_mm_mask_cvtepu16_ph() { + let a = _mm_set_epi16(1, 2, 3, 4, 5, 6, 7, 8); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_cvtepu16_ph(src, 0b01010101, a); + let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_max_ph() { - let a = _mm_set1_ph(2.0); - let b = _mm_set1_ph(1.0); - let r = _mm_maskz_max_ph(0b01010101, a, b); - let e = _mm_set_ph(0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0); + unsafe fn test_mm_maskz_cvtepu16_ph() { + let a = _mm_set_epi16(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm_maskz_cvtepu16_ph(0b01010101, a); + let e = _mm_set_ph(0., 2., 0., 4., 0., 6., 0., 8.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_max_ph() { - let a = _mm256_set1_ph(2.0); - let b = _mm256_set1_ph(1.0); - let r = _mm256_max_ph(a, b); - let e = _mm256_set1_ph(2.0); + unsafe fn test_mm256_cvtepu16_ph() { + let a = _mm256_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let r = _mm256_cvtepu16_ph(a); + let e = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_max_ph() { - let a = _mm256_set1_ph(2.0); - let b = _mm256_set1_ph(1.0); - let src = _mm256_set1_ph(3.0); - let r = _mm256_mask_max_ph(src, 0b0101010101010101, a, b); + unsafe fn test_mm256_mask_cvtepu16_ph() { + let a = _mm256_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let src = _mm256_set_ph( + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., + ); + let r = _mm256_mask_cvtepu16_ph(src, 0b0101010101010101, a); let e = _mm256_set_ph( - 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, + 10., 2., 12., 4., 14., 6., 16., 8., 18., 10., 20., 12., 22., 14., 24., 16., ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_max_ph() { - let a = _mm256_set1_ph(2.0); - let b = _mm256_set1_ph(1.0); - let r = _mm256_maskz_max_ph(0b0101010101010101, a, b); + unsafe fn test_mm256_maskz_cvtepu16_ph() { + let a = _mm256_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let r = _mm256_maskz_cvtepu16_ph(0b0101010101010101, a); let e = _mm256_set_ph( - 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, + 0., 2., 0., 4., 0., 6., 0., 8., 0., 10., 0., 12., 0., 14., 0., 16., ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_max_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let r = _mm512_max_ph(a, b); - let e = _mm512_set1_ph(2.0); + unsafe fn test_mm512_cvtepu16_ph() { + let a = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + let r = _mm512_cvtepu16_ph(a); + let e = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_max_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let src = _mm512_set1_ph(3.0); - let r = _mm512_mask_max_ph(src, 0b01010101010101010101010101010101, a, b); + unsafe fn test_mm512_mask_cvtepu16_ph() { + let a = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + let src = _mm512_set_ph( + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., + 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., + ); + let r = _mm512_mask_cvtepu16_ph(src, 0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, - 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, + 10., 2., 12., 4., 14., 6., 16., 8., 18., 10., 20., 12., 22., 14., 24., 16., 26., 18., + 28., 20., 30., 22., 32., 24., 34., 26., 36., 28., 38., 30., 40., 32., ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_max_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let r = _mm512_maskz_max_ph(0b01010101010101010101010101010101, a, b); + unsafe fn test_mm512_maskz_cvtepu16_ph() { + let a = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + let r = _mm512_maskz_cvtepu16_ph(0b01010101010101010101010101010101, a); let e = _mm512_set_ph( - 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, - 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, + 0., 2., 0., 4., 0., 6., 0., 8., 0., 10., 0., 12., 0., 14., 0., 16., 0., 18., 0., 20., + 0., 22., 0., 24., 0., 26., 0., 28., 0., 30., 0., 32., ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_max_round_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let r = _mm512_max_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_ph(2.0); + unsafe fn test_mm512_cvt_roundepu16_ph() { + let a = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + let r = _mm512_cvt_roundepu16_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + let e = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_max_round_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let src = _mm512_set1_ph(3.0); - let r = _mm512_mask_max_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask_cvt_roundepu16_ph() { + let a = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + let src = _mm512_set_ph( + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., + 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., + ); + let r = _mm512_mask_cvt_roundepu16_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 0b01010101010101010101010101010101, a, - b, ); let e = _mm512_set_ph( - 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, - 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, + 10., 2., 12., 4., 14., 6., 16., 8., 18., 10., 20., 12., 22., 14., 24., 16., 26., 18., + 28., 20., 30., 22., 32., 24., 34., 26., 36., 28., 38., 30., 40., 32., ); assert_eq_m512h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_max_round_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let r = _mm512_maskz_max_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_maskz_cvt_roundepu16_ph() { + let a = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + let r = _mm512_maskz_cvt_roundepu16_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( 0b01010101010101010101010101010101, a, - b, ); let e = _mm512_set_ph( - 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, - 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, + 0., 2., 0., 4., 0., 6., 0., 8., 0., 10., 0., 12., 0., 14., 0., 16., 0., 18., 0., 20., + 0., 22., 0., 24., 0., 26., 0., 28., 0., 30., 0., 32., ); assert_eq_m512h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_max_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let r = _mm_max_sh(a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_max_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); - let r = _mm_mask_max_sh(src, 0, a, b); - let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - let r = _mm_mask_max_sh(src, 1, a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_max_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let r = _mm_maskz_max_sh(0, a, b); - let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - let r = _mm_maskz_max_sh(1, a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_max_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let r = _mm_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_max_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); - let r = _mm_mask_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - let r = _mm_mask_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_max_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let r = - _mm_maskz_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - let r = - _mm_maskz_max_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_setr_ph(2.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_cvtepi32_ph() { + let a = _mm_set_epi32(1, 2, 3, 4); + let r = _mm_cvtepi32_ph(a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_min_ph() { - let a = _mm_set1_ph(2.0); - let b = _mm_set1_ph(1.0); - let r = _mm_min_ph(a, b); - let e = _mm_set1_ph(1.0); + unsafe fn test_mm_mask_cvtepi32_ph() { + let a = _mm_set_epi32(1, 2, 3, 4); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_cvtepi32_ph(src, 0b0101, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 14., 2., 16., 4.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_min_ph() { - let a = _mm_set1_ph(2.0); - let b = _mm_set1_ph(1.0); - let src = _mm_set1_ph(3.0); - let r = _mm_mask_min_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0); + unsafe fn test_mm_maskz_cvtepi32_ph() { + let a = _mm_set_epi32(1, 2, 3, 4); + let r = _mm_maskz_cvtepi32_ph(0b0101, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 2., 0.0, 4.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_min_ph() { - let a = _mm_set1_ph(2.0); - let b = _mm_set1_ph(1.0); - let r = _mm_maskz_min_ph(0b01010101, a, b); - let e = _mm_set_ph(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); + unsafe fn test_mm256_cvtepi32_ph() { + let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm256_cvtepi32_ph(a); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_min_ph() { - let a = _mm256_set1_ph(2.0); - let b = _mm256_set1_ph(1.0); - let r = _mm256_min_ph(a, b); - let e = _mm256_set1_ph(1.0); - assert_eq_m256h(r, e); + unsafe fn test_mm256_mask_cvtepi32_ph() { + let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm256_mask_cvtepi32_ph(src, 0b01010101, a); + let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_min_ph() { - let a = _mm256_set1_ph(2.0); - let b = _mm256_set1_ph(1.0); - let src = _mm256_set1_ph(3.0); - let r = _mm256_mask_min_ph(src, 0b0101010101010101, a, b); - let e = _mm256_set_ph( - 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, - ); - assert_eq_m256h(r, e); + unsafe fn test_mm256_maskz_cvtepi32_ph() { + let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm256_maskz_cvtepi32_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_min_ph() { - let a = _mm256_set1_ph(2.0); - let b = _mm256_set1_ph(1.0); - let r = _mm256_maskz_min_ph(0b0101010101010101, a, b); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_cvtepi32_ph() { + let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let r = _mm512_cvtepi32_ph(a); let e = _mm256_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_min_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let r = _mm512_min_ph(a, b); - let e = _mm512_set1_ph(1.0); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_min_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let src = _mm512_set1_ph(3.0); - let r = _mm512_mask_min_ph(src, 0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, - 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, + unsafe fn test_mm512_mask_cvtepi32_ph() { + let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let src = _mm256_set_ph( + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., ); - assert_eq_m512h(r, e); + let r = _mm512_mask_cvtepi32_ph(src, 0b0101010101010101, a); + let e = _mm256_set_ph( + 10., 2., 12., 4., 14., 6., 16., 8., 18., 10., 20., 12., 22., 14., 24., 16., + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_min_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let r = _mm512_maskz_min_ph(0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, - 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + unsafe fn test_mm512_maskz_cvtepi32_ph() { + let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let r = _mm512_maskz_cvtepi32_ph(0b0101010101010101, a); + let e = _mm256_set_ph( + 0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0, 0.0, 10.0, 0.0, 12.0, 0.0, 14.0, 0.0, 16.0, ); - assert_eq_m512h(r, e); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_min_round_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let r = _mm512_min_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_ph(1.0); - assert_eq_m512h(r, e); + unsafe fn test_mm512_cvt_roundepi32_ph() { + let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let r = _mm512_cvt_roundepi32_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + let e = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_min_round_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let src = _mm512_set1_ph(3.0); - let r = _mm512_mask_min_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm512_mask_cvt_roundepi32_ph() { + let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let src = _mm256_set_ph( + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., + ); + let r = _mm512_mask_cvt_roundepi32_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, - 0b01010101010101010101010101010101, + 0b0101010101010101, a, - b, ); - let e = _mm512_set_ph( - 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, - 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, + let e = _mm256_set_ph( + 10., 2., 12., 4., 14., 6., 16., 8., 18., 10., 20., 12., 22., 14., 24., 16., ); - assert_eq_m512h(r, e); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_min_round_ph() { - let a = _mm512_set1_ph(2.0); - let b = _mm512_set1_ph(1.0); - let r = _mm512_maskz_min_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, + unsafe fn test_mm512_maskz_cvt_roundepi32_ph() { + let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let r = _mm512_maskz_cvt_roundepi32_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, a, - b, ); - let e = _mm512_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, - 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + let e = _mm256_set_ph( + 0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0, 0.0, 10.0, 0.0, 12.0, 0.0, 14.0, 0.0, 16.0, ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_min_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let r = _mm_min_sh(a, b); - let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_min_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); - let r = _mm_mask_min_sh(src, 0, a, b); - let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - let r = _mm_mask_min_sh(src, 1, a, b); - let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_min_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let r = _mm_maskz_min_sh(0, a, b); - let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - let r = _mm_maskz_min_sh(1, a, b); - let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_cvti32_sh() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvti32_sh(a, 10); + let e = _mm_setr_ph(10.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_min_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let r = _mm_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + unsafe fn test_mm_cvt_roundi32_sh() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvt_roundi32_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, 10); + let e = _mm_setr_ph(10.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_min_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let src = _mm_setr_ph(3.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0); - let r = _mm_mask_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_setr_ph(3.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - let r = _mm_mask_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_cvtepu32_ph() { + let a = _mm_set_epi32(1, 2, 3, 4); + let r = _mm_cvtepu32_ph(a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_min_round_sh() { - let a = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let b = _mm_setr_ph(2.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0); - let r = - _mm_maskz_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_setr_ph(0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - assert_eq_m128h(r, e); - let r = - _mm_maskz_min_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_cvtepu32_ph() { + let a = _mm_set_epi32(1, 2, 3, 4); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_cvtepu32_ph(src, 0b0101, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 14., 2., 16., 4.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_getexp_ph() { - let a = _mm_set1_ph(3.0); - let r = _mm_getexp_ph(a); - let e = _mm_set1_ph(1.0); + unsafe fn test_mm_maskz_cvtepu32_ph() { + let a = _mm_set_epi32(1, 2, 3, 4); + let r = _mm_maskz_cvtepu32_ph(0b0101, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 2., 0.0, 4.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_getexp_ph() { - let a = _mm_set1_ph(3.0); - let src = _mm_set1_ph(4.0); - let r = _mm_mask_getexp_ph(src, 0b01010101, a); - let e = _mm_set_ph(4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0); + unsafe fn test_mm256_cvtepu32_ph() { + let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm256_cvtepu32_ph(a); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_getexp_ph() { - let a = _mm_set1_ph(3.0); - let r = _mm_maskz_getexp_ph(0b01010101, a); - let e = _mm_set_ph(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); + unsafe fn test_mm256_mask_cvtepu32_ph() { + let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm256_mask_cvtepu32_ph(src, 0b01010101, a); + let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_getexp_ph() { - let a = _mm256_set1_ph(3.0); - let r = _mm256_getexp_ph(a); - let e = _mm256_set1_ph(1.0); - assert_eq_m256h(r, e); + unsafe fn test_mm256_maskz_cvtepu32_ph() { + let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm256_maskz_cvtepu32_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_getexp_ph() { - let a = _mm256_set1_ph(3.0); - let src = _mm256_set1_ph(4.0); - let r = _mm256_mask_getexp_ph(src, 0b0101010101010101, a); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_cvtepu32_ph() { + let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let r = _mm512_cvtepu32_ph(a); let e = _mm256_set_ph( - 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_getexp_ph() { - let a = _mm256_set1_ph(3.0); - let r = _mm256_maskz_getexp_ph(0b0101010101010101, a); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_cvtepu32_ph() { + let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let src = _mm256_set_ph( + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., + ); + let r = _mm512_mask_cvtepu32_ph(src, 0b0101010101010101, a); let e = _mm256_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + 10., 2.0, 12., 4.0, 14., 6.0, 16., 8.0, 18., 10.0, 20., 12.0, 22., 14.0, 24., 16.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_getexp_ph() { - let a = _mm512_set1_ph(3.0); - let r = _mm512_getexp_ph(a); - let e = _mm512_set1_ph(1.0); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_getexp_ph() { - let a = _mm512_set1_ph(3.0); - let src = _mm512_set1_ph(4.0); - let r = _mm512_mask_getexp_ph(src, 0b01010101010101010101010101010101, a); - let e = _mm512_set_ph( - 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, - 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, + unsafe fn test_mm512_maskz_cvtepu32_ph() { + let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let r = _mm512_maskz_cvtepu32_ph(0b0101010101010101, a); + let e = _mm256_set_ph( + 0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0, 0.0, 10.0, 0.0, 12.0, 0.0, 14.0, 0.0, 16.0, ); - assert_eq_m512h(r, e); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_getexp_ph() { - let a = _mm512_set1_ph(3.0); - let r = _mm512_maskz_getexp_ph(0b01010101010101010101010101010101, a); - let e = _mm512_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, - 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + unsafe fn test_mm512_cvt_roundepu32_ph() { + let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let r = _mm512_cvt_roundepu32_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + let e = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_getexp_round_ph() { - let a = _mm512_set1_ph(3.0); - let r = _mm512_getexp_round_ph::<_MM_FROUND_NO_EXC>(a); - let e = _mm512_set1_ph(1.0); - assert_eq_m512h(r, e); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_getexp_round_ph() { - let a = _mm512_set1_ph(3.0); - let src = _mm512_set1_ph(4.0); - let r = _mm512_mask_getexp_round_ph::<_MM_FROUND_NO_EXC>( + unsafe fn test_mm512_mask_cvt_roundepu32_ph() { + let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let src = _mm256_set_ph( + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., + ); + let r = _mm512_mask_cvt_roundepu32_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, - 0b01010101010101010101010101010101, + 0b0101010101010101, a, ); - let e = _mm512_set_ph( - 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, - 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, + let e = _mm256_set_ph( + 10.0, 2.0, 12.0, 4.0, 14.0, 6.0, 16.0, 8.0, 18.0, 10.0, 20.0, 12.0, 22.0, 14.0, 24.0, + 16.0, ); - assert_eq_m512h(r, e); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_getexp_round_ph() { - let a = _mm512_set1_ph(3.0); - let r = _mm512_maskz_getexp_round_ph::<_MM_FROUND_NO_EXC>( - 0b01010101010101010101010101010101, + unsafe fn test_mm512_maskz_cvt_roundepu32_ph() { + let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let r = _mm512_maskz_cvt_roundepu32_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, a, ); - let e = _mm512_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, - 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + let e = _mm256_set_ph( + 0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0, 0.0, 10.0, 0.0, 12.0, 0.0, 14.0, 0.0, 16.0, ); - assert_eq_m512h(r, e); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_getexp_sh() { - let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_getexp_sh(a, b); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_cvtu32_sh() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvtu32_sh(a, 10); + let e = _mm_setr_ph(10.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_getexp_sh() { - let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); - let src = _mm_setr_ph(4.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_getexp_sh(src, 0, a, b); - let e = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_cvt_roundu32_sh() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvt_roundu32_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, 10); + let e = _mm_setr_ph(10.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); assert_eq_m128h(r, e); - let r = _mm_mask_getexp_sh(src, 1, a, b); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_cvtepi64_ph() { + let a = _mm_set_epi64x(1, 2); + let r = _mm_cvtepi64_ph(a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_getexp_sh() { - let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_maskz_getexp_sh(0, a, b); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_cvtepi64_ph() { + let a = _mm_set_epi64x(1, 2); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_cvtepi64_ph(src, 0b01, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 16., 2.); assert_eq_m128h(r, e); - let r = _mm_maskz_getexp_sh(1, a, b); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_cvtepi64_ph() { + let a = _mm_set_epi64x(1, 2); + let r = _mm_maskz_cvtepi64_ph(0b01, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_cvtepi64_ph() { + let a = _mm256_set_epi64x(1, 2, 3, 4); + let r = _mm256_cvtepi64_ph(a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_cvtepi64_ph() { + let a = _mm256_set_epi64x(1, 2, 3, 4); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm256_mask_cvtepi64_ph(src, 0b0101, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 14., 2.0, 16.0, 4.0); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_cvtepi64_ph() { + let a = _mm256_set_epi64x(1, 2, 3, 4); + let r = _mm256_maskz_cvtepi64_ph(0b0101, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 4.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_getexp_round_sh() { - let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_getexp_round_sh::<_MM_FROUND_NO_EXC>(a, b); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm512_cvtepi64_ph() { + let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm512_cvtepi64_ph(a); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_getexp_round_sh() { - let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); - let src = _mm_setr_ph(4.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_getexp_round_sh::<_MM_FROUND_NO_EXC>(src, 0, a, b); - let e = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm512_mask_cvtepi64_ph() { + let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm512_mask_cvtepi64_ph(src, 0b01010101, a); + let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); assert_eq_m128h(r, e); - let r = _mm_mask_getexp_round_sh::<_MM_FROUND_NO_EXC>(src, 1, a, b); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_cvtepi64_ph() { + let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm512_maskz_cvtepi64_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 2., 0.0, 4., 0.0, 6., 0.0, 8.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_getexp_round_sh() { - let a = _mm_setr_ph(4.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_maskz_getexp_round_sh::<_MM_FROUND_NO_EXC>(0, a, b); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm512_cvt_roundepi64_ph() { + let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm512_cvt_roundepi64_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); assert_eq_m128h(r, e); - let r = _mm_maskz_getexp_round_sh::<_MM_FROUND_NO_EXC>(1, a, b); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_cvt_roundepi64_ph() { + let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm512_mask_cvt_roundepi64_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0b01010101, a, + ); + let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_getmant_ph() { - let a = _mm_set1_ph(10.0); - let r = _mm_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(a); - let e = _mm_set1_ph(1.25); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_cvt_roundepi64_ph() { + let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm512_maskz_cvt_roundepi64_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101, a, + ); + let e = _mm_set_ph(0.0, 2., 0.0, 4., 0.0, 6., 0.0, 8.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_getmant_ph() { - let a = _mm_set1_ph(10.0); - let src = _mm_set1_ph(20.0); - let r = _mm_mask_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(src, 0b01010101, a); - let e = _mm_set_ph(20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25); + unsafe fn test_mm_cvtepu64_ph() { + let a = _mm_set_epi64x(1, 2); + let r = _mm_cvtepu64_ph(a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_getmant_ph() { - let a = _mm_set1_ph(10.0); - let r = _mm_maskz_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(0b01010101, a); - let e = _mm_set_ph(0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25); + unsafe fn test_mm_mask_cvtepu64_ph() { + let a = _mm_set_epi64x(1, 2); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_cvtepu64_ph(src, 0b01, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 16., 2.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_getmant_ph() { - let a = _mm256_set1_ph(10.0); - let r = _mm256_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(a); - let e = _mm256_set1_ph(1.25); - assert_eq_m256h(r, e); + unsafe fn test_mm_maskz_cvtepu64_ph() { + let a = _mm_set_epi64x(1, 2); + let r = _mm_maskz_cvtepu64_ph(0b01, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_getmant_ph() { - let a = _mm256_set1_ph(10.0); - let src = _mm256_set1_ph(20.0); - let r = _mm256_mask_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>( - src, - 0b0101010101010101, - a, - ); - let e = _mm256_set_ph( - 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, - 20.0, 1.25, - ); - assert_eq_m256h(r, e); + unsafe fn test_mm256_cvtepu64_ph() { + let a = _mm256_set_epi64x(1, 2, 3, 4); + let r = _mm256_cvtepu64_ph(a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_getmant_ph() { - let a = _mm256_set1_ph(10.0); - let r = _mm256_maskz_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>( - 0b0101010101010101, - a, - ); - let e = _mm256_set_ph( - 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, - ); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_getmant_ph() { - let a = _mm512_set1_ph(10.0); - let r = _mm512_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(a); - let e = _mm512_set1_ph(1.25); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_getmant_ph() { - let a = _mm512_set1_ph(10.0); - let src = _mm512_set1_ph(20.0); - let r = _mm512_mask_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>( - src, - 0b01010101010101010101010101010101, - a, - ); - let e = _mm512_set_ph( - 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, - 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, - 20.0, 1.25, 20.0, 1.25, - ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_getmant_ph() { - let a = _mm512_set1_ph(10.0); - let r = _mm512_maskz_getmant_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>( - 0b01010101010101010101010101010101, - a, - ); - let e = _mm512_set_ph( - 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, - 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, - ); - assert_eq_m512h(r, e); + unsafe fn test_mm256_mask_cvtepu64_ph() { + let a = _mm256_set_epi64x(1, 2, 3, 4); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm256_mask_cvtepu64_ph(src, 0b0101, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 14., 2.0, 16.0, 4.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_getmant_round_ph() { - let a = _mm512_set1_ph(10.0); - let r = - _mm512_getmant_round_ph::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN, _MM_FROUND_NO_EXC>( - a, - ); - let e = _mm512_set1_ph(1.25); - assert_eq_m512h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_cvtepu64_ph() { + let a = _mm256_set_epi64x(1, 2, 3, 4); + let r = _mm256_maskz_cvtepu64_ph(0b0101, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 4.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_getmant_round_ph() { - let a = _mm512_set1_ph(10.0); - let src = _mm512_set1_ph(20.0); - let r = _mm512_mask_getmant_round_ph::< - _MM_MANT_NORM_P75_1P5, - _MM_MANT_SIGN_NAN, - _MM_FROUND_NO_EXC, - >(src, 0b01010101010101010101010101010101, a); - let e = _mm512_set_ph( - 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, - 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, 20.0, 1.25, - 20.0, 1.25, 20.0, 1.25, - ); - assert_eq_m512h(r, e); + unsafe fn test_mm512_cvtepu64_ph() { + let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm512_cvtepu64_ph(a); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_getmant_round_ph() { - let a = _mm512_set1_ph(10.0); - let r = _mm512_maskz_getmant_round_ph::< - _MM_MANT_NORM_P75_1P5, - _MM_MANT_SIGN_NAN, - _MM_FROUND_NO_EXC, - >(0b01010101010101010101010101010101, a); - let e = _mm512_set_ph( - 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, - 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, 0.0, 1.25, - ); - assert_eq_m512h(r, e); + unsafe fn test_mm512_mask_cvtepu64_ph() { + let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm512_mask_cvtepu64_ph(src, 0b01010101, a); + let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_getmant_sh() { - let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_getmant_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(a, b); - let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm512_maskz_cvtepu64_ph() { + let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm512_maskz_cvtepu64_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 2., 0.0, 4., 0.0, 6., 0.0, 8.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_getmant_sh() { - let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); - let src = _mm_setr_ph(20.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_getmant_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(src, 0, a, b); - let e = _mm_setr_ph(20.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_getmant_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(src, 1, a, b); - let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm512_cvt_roundepu64_ph() { + let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm512_cvt_roundepu64_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_getmant_sh() { - let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_maskz_getmant_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(0, a, b); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_maskz_getmant_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN>(1, a, b); - let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm512_mask_cvt_roundepu64_ph() { + let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm512_mask_cvt_roundepu64_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0b01010101, a, + ); + let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_getmant_round_sh() { - let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_getmant_round_sh::<_MM_MANT_NORM_P75_1P5, _MM_MANT_SIGN_NAN, _MM_FROUND_NO_EXC>( - a, b, + unsafe fn test_mm512_maskz_cvt_roundepu64_ph() { + let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm512_maskz_cvt_roundepu64_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101, a, ); - let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); + let e = _mm_set_ph(0.0, 2., 0.0, 4., 0.0, 6., 0.0, 8.); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_getmant_round_sh() { - let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); - let src = _mm_setr_ph(20.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_getmant_round_sh::< - _MM_MANT_NORM_P75_1P5, - _MM_MANT_SIGN_NAN, - _MM_FROUND_NO_EXC, - >(src, 0, a, b); - let e = _mm_setr_ph(20.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_getmant_round_sh::< - _MM_MANT_NORM_P75_1P5, - _MM_MANT_SIGN_NAN, - _MM_FROUND_NO_EXC, - >(src, 1, a, b); - let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_cvtxps_ph() { + let a = _mm_set_ps(1.0, 2.0, 3.0, 4.0); + let r = _mm_cvtxps_ph(a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_getmant_round_sh() { - let a = _mm_setr_ph(15.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(10.0, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_maskz_getmant_round_sh::< - _MM_MANT_NORM_P75_1P5, - _MM_MANT_SIGN_NAN, - _MM_FROUND_NO_EXC, - >(0, a, b); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_maskz_getmant_round_sh::< - _MM_MANT_NORM_P75_1P5, - _MM_MANT_SIGN_NAN, - _MM_FROUND_NO_EXC, - >(1, a, b); - let e = _mm_setr_ph(1.25, 10., 11., 12., 13., 14., 15., 16.); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_cvtxps_ph() { + let a = _mm_set_ps(1.0, 2.0, 3.0, 4.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_cvtxps_ph(src, 0b0101, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 14., 2.0, 16., 4.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_roundscale_ph() { - let a = _mm_set1_ph(1.1); - let r = _mm_roundscale_ph::<0>(a); - let e = _mm_set1_ph(1.0); + unsafe fn test_mm_maskz_cvtxps_ph() { + let a = _mm_set_ps(1.0, 2.0, 3.0, 4.0); + let r = _mm_maskz_cvtxps_ph(0b0101, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 4.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_roundscale_ph() { - let a = _mm_set1_ph(1.1); - let src = _mm_set1_ph(2.0); - let r = _mm_mask_roundscale_ph::<0>(src, 0b01010101, a); - let e = _mm_set_ph(2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0); + unsafe fn test_mm256_cvtxps_ph() { + let a = _mm256_set_ps(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm256_cvtxps_ph(a); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_roundscale_ph() { - let a = _mm_set1_ph(1.1); - let r = _mm_maskz_roundscale_ph::<0>(0b01010101, a); - let e = _mm_set_ph(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); + unsafe fn test_mm256_mask_cvtxps_ph() { + let a = _mm256_set_ps(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm256_mask_cvtxps_ph(src, 0b01010101, a); + let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_roundscale_ph() { - let a = _mm256_set1_ph(1.1); - let r = _mm256_roundscale_ph::<0>(a); - let e = _mm256_set1_ph(1.0); - assert_eq_m256h(r, e); + unsafe fn test_mm256_maskz_cvtxps_ph() { + let a = _mm256_set_ps(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm256_maskz_cvtxps_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0); + assert_eq_m128h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_roundscale_ph() { - let a = _mm256_set1_ph(1.1); - let src = _mm256_set1_ph(2.0); - let r = _mm256_mask_roundscale_ph::<0>(src, 0b0101010101010101, a); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_cvtxps_ph() { + let a = _mm512_set_ps( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let r = _mm512_cvtxps_ph(a); let e = _mm256_set_ph( - 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); assert_eq_m256h(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_roundscale_ph() { - let a = _mm256_set1_ph(1.1); - let r = _mm256_maskz_roundscale_ph::<0>(0b0101010101010101, a); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_cvtxps_ph() { + let a = _mm512_set_ps( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let src = _mm256_set_ph( + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., + ); + let r = _mm512_mask_cvtxps_ph(src, 0b0101010101010101, a); let e = _mm256_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + 10., 2.0, 12., 4.0, 14., 6.0, 16., 8.0, 18., 10.0, 20., 12.0, 22., 14.0, 24., 16.0, ); assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_roundscale_ph() { - let a = _mm512_set1_ph(1.1); - let r = _mm512_roundscale_ph::<0>(a); - let e = _mm512_set1_ph(1.0); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_roundscale_ph() { - let a = _mm512_set1_ph(1.1); - let src = _mm512_set1_ph(2.0); - let r = _mm512_mask_roundscale_ph::<0>(src, 0b01010101010101010101010101010101, a); - let e = _mm512_set_ph( - 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, - 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, + unsafe fn test_mm512_maskz_cvtxps_ph() { + let a = _mm512_set_ps( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - assert_eq_m512h(r, e); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_roundscale_ph() { - let a = _mm512_set1_ph(1.1); - let r = _mm512_maskz_roundscale_ph::<0>(0b01010101010101010101010101010101, a); - let e = _mm512_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, - 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + let r = _mm512_maskz_cvtxps_ph(0b0101010101010101, a); + let e = _mm256_set_ph( + 0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0, 0.0, 10.0, 0.0, 12.0, 0.0, 14.0, 0.0, 16.0, ); - assert_eq_m512h(r, e); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_roundscale_round_ph() { - let a = _mm512_set1_ph(1.1); - let r = _mm512_roundscale_round_ph::<0, _MM_FROUND_NO_EXC>(a); - let e = _mm512_set1_ph(1.0); - assert_eq_m512h(r, e); + unsafe fn test_mm512_cvtx_roundps_ph() { + let a = _mm512_set_ps( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let r = _mm512_cvtx_roundps_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + let e = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_roundscale_round_ph() { - let a = _mm512_set1_ph(1.1); - let src = _mm512_set1_ph(2.0); - let r = _mm512_mask_roundscale_round_ph::<0, _MM_FROUND_NO_EXC>( + unsafe fn test_mm512_mask_cvtx_roundps_ph() { + let a = _mm512_set_ps( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let src = _mm256_set_ph( + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., + ); + let r = _mm512_mask_cvtx_roundps_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, - 0b01010101010101010101010101010101, + 0b0101010101010101, a, ); - let e = _mm512_set_ph( - 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, - 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, + let e = _mm256_set_ph( + 10.0, 2.0, 12.0, 4.0, 14.0, 6.0, 16.0, 8.0, 18.0, 10.0, 20.0, 12.0, 22.0, 14.0, 24.0, + 16.0, ); - assert_eq_m512h(r, e); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_roundscale_round_ph() { - let a = _mm512_set1_ph(1.1); - let r = _mm512_maskz_roundscale_round_ph::<0, _MM_FROUND_NO_EXC>( - 0b01010101010101010101010101010101, + unsafe fn test_mm512_maskz_cvtx_roundps_ph() { + let a = _mm512_set_ps( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let r = _mm512_maskz_cvtx_roundps_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, a, ); - let e = _mm512_set_ph( - 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, - 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + let e = _mm256_set_ph( + 0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0, 0.0, 10.0, 0.0, 12.0, 0.0, 14.0, 0.0, 16.0, ); - assert_eq_m512h(r, e); + assert_eq_m256h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_roundscale_sh() { - let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_roundscale_sh::<0>(a, b); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_cvtss_sh() { + let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let b = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); + let r = _mm_cvtss_sh(a, b); + let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_roundscale_sh() { - let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); - let src = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_roundscale_sh::<0>(src, 0, a, b); - let e = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_mask_cvtss_sh() { + let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let b = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); + let src = _mm_setr_ph(20., 21., 22., 23., 24., 25., 26., 27.); + let r = _mm_mask_cvtss_sh(src, 0, a, b); + let e = _mm_setr_ph(20., 11., 12., 13., 14., 15., 16., 17.); assert_eq_m128h(r, e); - let r = _mm_mask_roundscale_sh::<0>(src, 1, a, b); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_mask_cvtss_sh(src, 1, a, b); + let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_roundscale_sh() { - let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_maskz_roundscale_sh::<0>(0, a, b); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_maskz_cvtss_sh() { + let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let b = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); + let r = _mm_maskz_cvtss_sh(0, a, b); + let e = _mm_setr_ph(0.0, 11., 12., 13., 14., 15., 16., 17.); assert_eq_m128h(r, e); - let r = _mm_maskz_roundscale_sh::<0>(1, a, b); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_maskz_cvtss_sh(1, a, b); + let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_roundscale_round_sh() { - let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_roundscale_round_sh::<0, _MM_FROUND_NO_EXC>(a, b); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_cvt_roundss_sh() { + let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let b = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); + let r = _mm_cvt_roundss_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_roundscale_round_sh() { - let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); - let src = _mm_setr_ph(3.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_roundscale_round_sh::<0, _MM_FROUND_NO_EXC>(src, 0, a, b); - let e = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_mask_cvt_roundss_sh() { + let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let b = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); + let src = _mm_setr_ph(20., 21., 22., 23., 24., 25., 26., 27.); + let r = _mm_mask_cvt_roundss_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0, a, b, + ); + let e = _mm_setr_ph(20., 11., 12., 13., 14., 15., 16., 17.); assert_eq_m128h(r, e); - let r = _mm_mask_roundscale_round_sh::<0, _MM_FROUND_NO_EXC>(src, 1, a, b); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_mask_cvt_roundss_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 1, a, b, + ); + let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_roundscale_round_sh() { - let a = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(1.1, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_maskz_roundscale_round_sh::<0, _MM_FROUND_NO_EXC>(0, a, b); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_maskz_cvt_roundss_sh() { + let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let b = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); + let r = + _mm_maskz_cvt_roundss_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 11., 12., 13., 14., 15., 16., 17.); assert_eq_m128h(r, e); - let r = _mm_maskz_roundscale_round_sh::<0, _MM_FROUND_NO_EXC>(1, a, b); - let e = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); + let r = + _mm_maskz_cvt_roundss_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_scalef_ph() { - let a = _mm_set1_ph(1.); - let b = _mm_set1_ph(3.); - let r = _mm_scalef_ph(a, b); - let e = _mm_set1_ph(8.0); + unsafe fn test_mm_cvtpd_ph() { + let a = _mm_set_pd(1.0, 2.0); + let r = _mm_cvtpd_ph(a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_scalef_ph() { - let a = _mm_set1_ph(1.); - let b = _mm_set1_ph(3.); - let src = _mm_set1_ph(2.); - let r = _mm_mask_scalef_ph(src, 0b01010101, a, b); - let e = _mm_set_ph(2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0); + unsafe fn test_mm_mask_cvtpd_ph() { + let a = _mm_set_pd(1.0, 2.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm_mask_cvtpd_ph(src, 0b01, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 16., 2.); + assert_eq_m128h(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_cvtpd_ph() { + let a = _mm_set_pd(1.0, 2.0); + let r = _mm_maskz_cvtpd_ph(0b01, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_scalef_ph() { - let a = _mm_set1_ph(1.); - let b = _mm_set1_ph(3.); - let r = _mm_maskz_scalef_ph(0b01010101, a, b); - let e = _mm_set_ph(0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0); + unsafe fn test_mm256_cvtpd_ph() { + let a = _mm256_set_pd(1.0, 2.0, 3.0, 4.0); + let r = _mm256_cvtpd_ph(a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_scalef_ph() { - let a = _mm256_set1_ph(1.); - let b = _mm256_set1_ph(3.); - let r = _mm256_scalef_ph(a, b); - let e = _mm256_set1_ph(8.0); - assert_eq_m256h(r, e); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_scalef_ph() { - let a = _mm256_set1_ph(1.); - let b = _mm256_set1_ph(3.); - let src = _mm256_set1_ph(2.); - let r = _mm256_mask_scalef_ph(src, 0b0101010101010101, a, b); - let e = _mm256_set_ph( - 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, - ); - assert_eq_m256h(r, e); + unsafe fn test_mm256_mask_cvtpd_ph() { + let a = _mm256_set_pd(1.0, 2.0, 3.0, 4.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm256_mask_cvtpd_ph(src, 0b0101, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 14., 2.0, 16.0, 4.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_scalef_ph() { - let a = _mm256_set1_ph(1.); - let b = _mm256_set1_ph(3.); - let r = _mm256_maskz_scalef_ph(0b0101010101010101, a, b); - let e = _mm256_set_ph( - 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, - ); - assert_eq_m256h(r, e); + unsafe fn test_mm256_maskz_cvtpd_ph() { + let a = _mm256_set_pd(1.0, 2.0, 3.0, 4.0); + let r = _mm256_maskz_cvtpd_ph(0b0101, a); + let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 4.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_scalef_ph() { - let a = _mm512_set1_ph(1.); - let b = _mm512_set1_ph(3.); - let r = _mm512_scalef_ph(a, b); - let e = _mm512_set1_ph(8.0); - assert_eq_m512h(r, e); + unsafe fn test_mm512_cvtpd_ph() { + let a = _mm512_set_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_cvtpd_ph(a); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_scalef_ph() { - let a = _mm512_set1_ph(1.); - let b = _mm512_set1_ph(3.); - let src = _mm512_set1_ph(2.); - let r = _mm512_mask_scalef_ph(src, 0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, - 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, - ); - assert_eq_m512h(r, e); + unsafe fn test_mm512_mask_cvtpd_ph() { + let a = _mm512_set_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm512_mask_cvtpd_ph(src, 0b01010101, a); + let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_scalef_ph() { - let a = _mm512_set1_ph(1.); - let b = _mm512_set1_ph(3.); - let r = _mm512_maskz_scalef_ph(0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, - 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, - ); - assert_eq_m512h(r, e); + unsafe fn test_mm512_maskz_cvtpd_ph() { + let a = _mm512_set_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_maskz_cvtpd_ph(0b01010101, a); + let e = _mm_set_ph(0.0, 2., 0.0, 4., 0.0, 6., 0.0, 8.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_scalef_round_ph() { - let a = _mm512_set1_ph(1.); - let b = _mm512_set1_ph(3.); - let r = _mm512_scalef_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm512_set1_ph(8.0); - assert_eq_m512h(r, e); + unsafe fn test_mm512_cvt_roundpd_ph() { + let a = _mm512_set_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_cvt_roundpd_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_scalef_round_ph() { - let a = _mm512_set1_ph(1.); - let b = _mm512_set1_ph(3.); - let src = _mm512_set1_ph(2.); - let r = _mm512_mask_scalef_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, - 0b01010101010101010101010101010101, - a, - b, - ); - let e = _mm512_set_ph( - 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, - 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, 2.0, 8.0, + unsafe fn test_mm512_mask_cvt_roundpd_ph() { + let a = _mm512_set_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let r = _mm512_mask_cvt_roundpd_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0b01010101, a, ); - assert_eq_m512h(r, e); + let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_scalef_round_ph() { - let a = _mm512_set1_ph(1.); - let b = _mm512_set1_ph(3.); - let r = _mm512_maskz_scalef_round_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, - a, - b, - ); - let e = _mm512_set_ph( - 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, - 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, 0.0, 8.0, + unsafe fn test_mm512_maskz_cvt_roundpd_ph() { + let a = _mm512_set_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_maskz_cvt_roundpd_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101, a, ); - assert_eq_m512h(r, e); + let e = _mm_set_ph(0.0, 2., 0.0, 4., 0.0, 6., 0.0, 8.); + assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_scalef_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_scalef_sh(a, b); - let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_cvtsd_sh() { + let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let b = _mm_setr_pd(1.0, 2.0); + let r = _mm_cvtsd_sh(a, b); + let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_scalef_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); - let src = _mm_setr_ph(2.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_scalef_sh(src, 0, a, b); - let e = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_mask_cvtsd_sh() { + let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let b = _mm_setr_pd(1.0, 2.0); + let src = _mm_setr_ph(20., 21., 22., 23., 24., 25., 26., 27.); + let r = _mm_mask_cvtsd_sh(src, 0, a, b); + let e = _mm_setr_ph(20., 11., 12., 13., 14., 15., 16., 17.); assert_eq_m128h(r, e); - let r = _mm_mask_scalef_sh(src, 1, a, b); - let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_mask_cvtsd_sh(src, 1, a, b); + let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_scalef_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_maskz_scalef_sh(0, a, b); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_maskz_cvtsd_sh() { + let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let b = _mm_setr_pd(1.0, 2.0); + let r = _mm_maskz_cvtsd_sh(0, a, b); + let e = _mm_setr_ph(0.0, 11., 12., 13., 14., 15., 16., 17.); assert_eq_m128h(r, e); - let r = _mm_maskz_scalef_sh(1, a, b); - let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); + let r = _mm_maskz_cvtsd_sh(1, a, b); + let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_scalef_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_scalef_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); + unsafe fn test_mm_cvt_roundsd_sh() { + let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let b = _mm_setr_pd(1.0, 2.0); + let r = _mm_cvt_roundsd_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); + let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_scalef_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); - let src = _mm_setr_ph(2.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_scalef_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + unsafe fn test_mm_mask_cvt_roundsd_sh() { + let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let b = _mm_setr_pd(1.0, 2.0); + let src = _mm_setr_ph(20., 21., 22., 23., 24., 25., 26., 27.); + let r = _mm_mask_cvt_roundsd_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 0, a, b, ); - let e = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); + let e = _mm_setr_ph(20., 11., 12., 13., 14., 15., 16., 17.); assert_eq_m128h(r, e); - let r = _mm_mask_scalef_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm_mask_cvt_roundsd_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, 1, a, b, ); - let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); + let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_scalef_round_sh() { - let a = _mm_setr_ph(1.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(3.0, 20., 21., 22., 23., 24., 25., 26.); + unsafe fn test_mm_maskz_cvt_roundsd_sh() { + let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); + let b = _mm_setr_pd(1.0, 2.0); let r = - _mm_maskz_scalef_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); + _mm_maskz_cvt_roundsd_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); + let e = _mm_setr_ph(0.0, 11., 12., 13., 14., 15., 16., 17.); assert_eq_m128h(r, e); let r = - _mm_maskz_scalef_round_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_setr_ph(8.0, 10., 11., 12., 13., 14., 15., 16.); + _mm_maskz_cvt_roundsd_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); + let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); assert_eq_m128h(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_reduce_ph() { - let a = _mm_set1_ph(1.25); - let r = _mm_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(a); - let e = _mm_set1_ph(0.25); - assert_eq_m128h(r, e); + unsafe fn test_mm_cvtph_epi16() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvttph_epi16(a); + let e = _mm_set_epi16(1, 2, 3, 4, 5, 6, 7, 8); + assert_eq_m128i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_reduce_ph() { - let a = _mm_set1_ph(1.25); - let src = _mm_set1_ph(2.0); - let r = _mm_mask_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(src, 0b01010101, a); - let e = _mm_set_ph(2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25); - assert_eq_m128h(r, e); + unsafe fn test_mm_mask_cvtph_epi16() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let src = _mm_set_epi16(10, 11, 12, 13, 14, 15, 16, 17); + let r = _mm_mask_cvttph_epi16(src, 0b01010101, a); + let e = _mm_set_epi16(10, 2, 12, 4, 14, 6, 16, 8); + assert_eq_m128i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_reduce_ph() { - let a = _mm_set1_ph(1.25); - let r = _mm_maskz_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(0b01010101, a); - let e = _mm_set_ph(0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25); - assert_eq_m128h(r, e); + unsafe fn test_mm_maskz_cvtph_epi16() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_maskz_cvttph_epi16(0b01010101, a); + let e = _mm_set_epi16(0, 2, 0, 4, 0, 6, 0, 8); + assert_eq_m128i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_reduce_ph() { - let a = _mm256_set1_ph(1.25); - let r = _mm256_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(a); - let e = _mm256_set1_ph(0.25); - assert_eq_m256h(r, e); + unsafe fn test_mm256_cvtph_epi16() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let r = _mm256_cvttph_epi16(a); + let e = _mm256_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + assert_eq_m256i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_reduce_ph() { - let a = _mm256_set1_ph(1.25); - let src = _mm256_set1_ph(2.0); - let r = _mm256_mask_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(src, 0b0101010101010101, a); - let e = _mm256_set_ph( - 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, + unsafe fn test_mm256_mask_cvtph_epi16() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - assert_eq_m256h(r, e); + let src = _mm256_set_epi16( + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + ); + let r = _mm256_mask_cvttph_epi16(src, 0b0101010101010101, a); + let e = _mm256_set_epi16(10, 2, 12, 4, 14, 6, 16, 8, 18, 10, 20, 12, 22, 14, 24, 16); + assert_eq_m256i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_reduce_ph() { - let a = _mm256_set1_ph(1.25); - let r = _mm256_maskz_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(0b0101010101010101, a); - let e = _mm256_set_ph( - 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, + unsafe fn test_mm256_maskz_cvtph_epi16() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - assert_eq_m256h(r, e); + let r = _mm256_maskz_cvttph_epi16(0b0101010101010101, a); + let e = _mm256_set_epi16(0, 2, 0, 4, 0, 6, 0, 8, 0, 10, 0, 12, 0, 14, 0, 16); + assert_eq_m256i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_reduce_ph() { - let a = _mm512_set1_ph(1.25); - let r = _mm512_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>(a); - let e = _mm512_set1_ph(0.25); - assert_eq_m512h(r, e); + unsafe fn test_mm512_cvtph_epi16() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let r = _mm512_cvttph_epi16(a); + let e = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_reduce_ph() { - let a = _mm512_set1_ph(1.25); - let src = _mm512_set1_ph(2.0); - let r = _mm512_mask_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>( - src, - 0b01010101010101010101010101010101, - a, + unsafe fn test_mm512_mask_cvtph_epi16() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); - let e = _mm512_set_ph( - 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, - 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, + let src = _mm512_set_epi16( + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, ); - assert_eq_m512h(r, e); + let r = _mm512_mask_cvttph_epi16(src, 0b01010101010101010101010101010101, a); + let e = _mm512_set_epi16( + 10, 2, 12, 4, 14, 6, 16, 8, 18, 10, 20, 12, 22, 14, 24, 16, 26, 18, 28, 20, 30, 22, 32, + 24, 34, 26, 36, 28, 38, 30, 40, 32, + ); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_reduce_ph() { - let a = _mm512_set1_ph(1.25); - let r = _mm512_maskz_reduce_ph::<{ 16 | _MM_FROUND_TO_ZERO }>( - 0b01010101010101010101010101010101, - a, + unsafe fn test_mm512_maskz_cvtph_epi16() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); - let e = _mm512_set_ph( - 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, - 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, + let r = _mm512_maskz_cvttph_epi16(0b01010101010101010101010101010101, a); + let e = _mm512_set_epi16( + 0, 2, 0, 4, 0, 6, 0, 8, 0, 10, 0, 12, 0, 14, 0, 16, 0, 18, 0, 20, 0, 22, 0, 24, 0, 26, + 0, 28, 0, 30, 0, 32, ); - assert_eq_m512h(r, e); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_reduce_round_ph() { - let a = _mm512_set1_ph(1.25); - let r = _mm512_reduce_round_ph::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>(a); - let e = _mm512_set1_ph(0.25); - assert_eq_m512h(r, e); + unsafe fn test_mm512_cvt_roundph_epi16() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let r = _mm512_cvtt_roundph_epi16::<_MM_FROUND_NO_EXC>(a); + let e = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_reduce_round_ph() { - let a = _mm512_set1_ph(1.25); - let src = _mm512_set1_ph(2.0); - let r = _mm512_mask_reduce_round_ph::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>( + unsafe fn test_mm512_mask_cvt_roundph_epi16() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let src = _mm512_set_epi16( + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + ); + let r = _mm512_mask_cvtt_roundph_epi16::<_MM_FROUND_NO_EXC>( src, 0b01010101010101010101010101010101, a, ); - let e = _mm512_set_ph( - 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, - 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, 2.0, 0.25, + let e = _mm512_set_epi16( + 10, 2, 12, 4, 14, 6, 16, 8, 18, 10, 20, 12, 22, 14, 24, 16, 26, 18, 28, 20, 30, 22, 32, + 24, 34, 26, 36, 28, 38, 30, 40, 32, ); - assert_eq_m512h(r, e); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_reduce_round_ph() { - let a = _mm512_set1_ph(1.25); - let r = _mm512_maskz_reduce_round_ph::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>( + unsafe fn test_mm512_maskz_cvt_roundph_epi16() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let r = _mm512_maskz_cvtt_roundph_epi16::<_MM_FROUND_NO_EXC>( 0b01010101010101010101010101010101, a, ); - let e = _mm512_set_ph( - 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, - 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, 0.0, 0.25, + let e = _mm512_set_epi16( + 0, 2, 0, 4, 0, 6, 0, 8, 0, 10, 0, 12, 0, 14, 0, 16, 0, 18, 0, 20, 0, 22, 0, 24, 0, 26, + 0, 28, 0, 30, 0, 32, ); - assert_eq_m512h(r, e); + assert_eq_m512i(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_reduce_sh() { - let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_reduce_sh::<{ 16 | _MM_FROUND_TO_ZERO }>(a, b); - let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_cvtph_epu16() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvttph_epu16(a); + let e = _mm_set_epi16(1, 2, 3, 4, 5, 6, 7, 8); + assert_eq_m128i(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_reduce_sh() { - let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); - let src = _mm_setr_ph(2.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_reduce_sh::<{ 16 | _MM_FROUND_TO_ZERO }>(src, 0, a, b); - let e = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_reduce_sh::<{ 16 | _MM_FROUND_TO_ZERO }>(src, 1, a, b); - let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_cvtph_epu16() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let src = _mm_set_epi16(10, 11, 12, 13, 14, 15, 16, 17); + let r = _mm_mask_cvttph_epu16(src, 0b01010101, a); + let e = _mm_set_epi16(10, 2, 12, 4, 14, 6, 16, 8); + assert_eq_m128i(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_reduce_sh() { - let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_maskz_reduce_sh::<{ 16 | _MM_FROUND_TO_ZERO }>(0, a, b); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_maskz_reduce_sh::<{ 16 | _MM_FROUND_TO_ZERO }>(1, a, b); - let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_cvtph_epu16() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_maskz_cvttph_epu16(0b01010101, a); + let e = _mm_set_epi16(0, 2, 0, 4, 0, 6, 0, 8); + assert_eq_m128i(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_cvtph_epu16() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let r = _mm256_cvttph_epu16(a); + let e = _mm256_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_cvtph_epu16() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let src = _mm256_set_epi16( + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + ); + let r = _mm256_mask_cvttph_epu16(src, 0b0101010101010101, a); + let e = _mm256_set_epi16(10, 2, 12, 4, 14, 6, 16, 8, 18, 10, 20, 12, 22, 14, 24, 16); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_cvtph_epu16() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let r = _mm256_maskz_cvttph_epu16(0b0101010101010101, a); + let e = _mm256_set_epi16(0, 2, 0, 4, 0, 6, 0, 8, 0, 10, 0, 12, 0, 14, 0, 16); + assert_eq_m256i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_reduce_round_sh() { - let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); - let r = _mm_reduce_round_sh::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>(a, b); - let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); + unsafe fn test_mm512_cvtph_epu16() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let r = _mm512_cvttph_epu16(a); + let e = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_reduce_round_sh() { - let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); - let src = _mm_setr_ph(2.0, 30., 31., 32., 33., 34., 35., 36.); - let r = _mm_mask_reduce_round_sh::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>( - src, 0, a, b, + unsafe fn test_mm512_mask_cvtph_epu16() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); - let e = _mm_setr_ph(2.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = _mm_mask_reduce_round_sh::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>( - src, 1, a, b, + let src = _mm512_set_epi16( + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, ); - let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); + let r = _mm512_mask_cvttph_epu16(src, 0b01010101010101010101010101010101, a); + let e = _mm512_set_epi16( + 10, 2, 12, 4, 14, 6, 16, 8, 18, 10, 20, 12, 22, 14, 24, 16, 26, 18, 28, 20, 30, 22, 32, + 24, 34, 26, 36, 28, 38, 30, 40, 32, + ); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_reduce_round_sh() { - let a = _mm_setr_ph(3.0, 10., 11., 12., 13., 14., 15., 16.); - let b = _mm_setr_ph(1.25, 20., 21., 22., 23., 24., 25., 26.); - let r = - _mm_maskz_reduce_round_sh::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>(0, a, b); - let e = _mm_setr_ph(0.0, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); - let r = - _mm_maskz_reduce_round_sh::<{ 16 | _MM_FROUND_TO_ZERO }, _MM_FROUND_NO_EXC>(1, a, b); - let e = _mm_setr_ph(0.25, 10., 11., 12., 13., 14., 15., 16.); - assert_eq_m128h(r, e); + unsafe fn test_mm512_maskz_cvtph_epu16() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let r = _mm512_maskz_cvttph_epu16(0b01010101010101010101010101010101, a); + let e = _mm512_set_epi16( + 0, 2, 0, 4, 0, 6, 0, 8, 0, 10, 0, 12, 0, 14, 0, 16, 0, 18, 0, 20, 0, 22, 0, 24, 0, 26, + 0, 28, 0, 30, 0, 32, + ); + assert_eq_m512i(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_reduce_add_ph() { - let a = _mm_set1_ph(2.0); - let r = _mm_reduce_add_ph(a); - assert_eq!(r, 16.0); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_cvt_roundph_epu16() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let r = _mm512_cvt_roundph_epu16::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + let e = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + assert_eq_m512i(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_reduce_add_ph() { - let a = _mm256_set1_ph(2.0); - let r = _mm256_reduce_add_ph(a); - assert_eq!(r, 32.0); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_cvt_roundph_epu16() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let src = _mm512_set_epi16( + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + ); + let r = _mm512_mask_cvt_roundph_epu16::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b01010101010101010101010101010101, + a, + ); + let e = _mm512_set_epi16( + 10, 2, 12, 4, 14, 6, 16, 8, 18, 10, 20, 12, 22, 14, 24, 16, 26, 18, 28, 20, 30, 22, 32, + 24, 34, 26, 36, 28, 38, 30, 40, 32, + ); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_reduce_add_ph() { - let a = _mm512_set1_ph(2.0); - let r = _mm512_reduce_add_ph(a); - assert_eq!(r, 64.0); + unsafe fn test_mm512_maskz_cvt_roundph_epu16() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let r = _mm512_maskz_cvt_roundph_epu16::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101010101010101010101010101, + a, + ); + let e = _mm512_set_epi16( + 0, 2, 0, 4, 0, 6, 0, 8, 0, 10, 0, 12, 0, 14, 0, 16, 0, 18, 0, 20, 0, 22, 0, 24, 0, 26, + 0, 28, 0, 30, 0, 32, + ); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_reduce_mul_ph() { - let a = _mm_set1_ph(2.0); - let r = _mm_reduce_mul_ph(a); - assert_eq!(r, 256.0); + unsafe fn test_mm_cvttph_epi16() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvttph_epi16(a); + let e = _mm_set_epi16(1, 2, 3, 4, 5, 6, 7, 8); + assert_eq_m128i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_reduce_mul_ph() { - let a = _mm256_set1_ph(2.0); - let r = _mm256_reduce_mul_ph(a); - assert_eq!(r, 65536.0); - } - - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_reduce_mul_ph() { - let a = _mm512_set1_ph(2.0); - let r = _mm512_reduce_mul_ph(a); - assert_eq!(r, 16777216.0); + unsafe fn test_mm_mask_cvttph_epi16() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let src = _mm_set_epi16(10, 11, 12, 13, 14, 15, 16, 17); + let r = _mm_mask_cvttph_epi16(src, 0b01010101, a); + let e = _mm_set_epi16(10, 2, 12, 4, 14, 6, 16, 8); + assert_eq_m128i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_reduce_max_ph() { + unsafe fn test_mm_maskz_cvttph_epi16() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let r = _mm_reduce_max_ph(a); - assert_eq!(r, 8.0); + let r = _mm_maskz_cvttph_epi16(0b01010101, a); + let e = _mm_set_epi16(0, 2, 0, 4, 0, 6, 0, 8); + assert_eq_m128i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_reduce_max_ph() { + unsafe fn test_mm256_cvttph_epi16() { let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let r = _mm256_reduce_max_ph(a); - assert_eq!(r, 16.0); + let r = _mm256_cvttph_epi16(a); + let e = _mm256_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + assert_eq_m256i(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_reduce_max_ph() { - let a = _mm512_set_ph( + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_cvttph_epi16() { + let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, ); - let r = _mm512_reduce_max_ph(a); - assert_eq!(r, 32.0); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_reduce_min_ph() { - let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let r = _mm_reduce_min_ph(a); - assert_eq!(r, 1.0); + let src = _mm256_set_epi16( + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + ); + let r = _mm256_mask_cvttph_epi16(src, 0b0101010101010101, a); + let e = _mm256_set_epi16(10, 2, 12, 4, 14, 6, 16, 8, 18, 10, 20, 12, 22, 14, 24, 16); + assert_eq_m256i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_reduce_min_ph() { + unsafe fn test_mm256_maskz_cvttph_epi16() { let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let r = _mm256_reduce_min_ph(a); - assert_eq!(r, 1.0); + let r = _mm256_maskz_cvttph_epi16(0b0101010101010101, a); + let e = _mm256_set_epi16(0, 2, 0, 4, 0, 6, 0, 8, 0, 10, 0, 12, 0, 14, 0, 16); + assert_eq_m256i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_reduce_min_ph() { + unsafe fn test_mm512_cvttph_epi16() { let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, ); - let r = _mm512_reduce_min_ph(a); - assert_eq!(r, 1.0); - } - - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_fpclass_ph_mask() { - let a = _mm_set_ph( - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal + let r = _mm512_cvttph_epi16(a); + let e = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, ); - let r = _mm_fpclass_ph_mask::<0x18>(a); // infinities - assert_eq!(r, 0b01100000); + assert_eq_m512i(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_fpclass_ph_mask() { - let a = _mm_set_ph( - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_cvttph_epi16() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); - let r = _mm_mask_fpclass_ph_mask::<0x18>(0b01010101, a); - assert_eq!(r, 0b01000000); + let src = _mm512_set_epi16( + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + ); + let r = _mm512_mask_cvttph_epi16(src, 0b01010101010101010101010101010101, a); + let e = _mm512_set_epi16( + 10, 2, 12, 4, 14, 6, 16, 8, 18, 10, 20, 12, 22, 14, 24, 16, 26, 18, 28, 20, 30, 22, 32, + 24, 34, 26, 36, 28, 38, 30, 40, 32, + ); + assert_eq_m512i(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_fpclass_ph_mask() { - let a = _mm256_set_ph( - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_cvttph_epi16() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); - let r = _mm256_fpclass_ph_mask::<0x18>(a); // infinities - assert_eq!(r, 0b0110000001100000); + let r = _mm512_maskz_cvttph_epi16(0b01010101010101010101010101010101, a); + let e = _mm512_set_epi16( + 0, 2, 0, 4, 0, 6, 0, 8, 0, 10, 0, 12, 0, 14, 0, 16, 0, 18, 0, 20, 0, 22, 0, 24, 0, 26, + 0, 28, 0, 30, 0, 32, + ); + assert_eq_m512i(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_fpclass_ph_mask() { - let a = _mm256_set_ph( - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_cvtt_roundph_epi16() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); - let r = _mm256_mask_fpclass_ph_mask::<0x18>(0b0101010101010101, a); - assert_eq!(r, 0b0100000001000000); + let r = _mm512_cvtt_roundph_epi16::<_MM_FROUND_NO_EXC>(a); + let e = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_fpclass_ph_mask() { + unsafe fn test_mm512_mask_cvtt_roundph_epi16() { let a = _mm512_set_ph( - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); - let r = _mm512_fpclass_ph_mask::<0x18>(a); // infinities - assert_eq!(r, 0b01100000011000000110000001100000); + let src = _mm512_set_epi16( + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + ); + let r = _mm512_mask_cvtt_roundph_epi16::<_MM_FROUND_NO_EXC>( + src, + 0b01010101010101010101010101010101, + a, + ); + let e = _mm512_set_epi16( + 10, 2, 12, 4, 14, 6, 16, 8, 18, 10, 20, 12, 22, 14, 24, 16, 26, 18, 28, 20, 30, 22, 32, + 24, 34, 26, 36, 28, 38, 30, 40, 32, + ); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_fpclass_ph_mask() { + unsafe fn test_mm512_maskz_cvtt_roundph_epi16() { let a = _mm512_set_ph( - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal - 1., - f16::INFINITY, - f16::NEG_INFINITY, - 0.0, - -0.0, - -2.0, - f16::NAN, - 5.9e-8, // Denormal + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); - let r = _mm512_mask_fpclass_ph_mask::<0x18>(0b01010101010101010101010101010101, a); - assert_eq!(r, 0b01000000010000000100000001000000); + let r = _mm512_maskz_cvtt_roundph_epi16::<_MM_FROUND_NO_EXC>( + 0b01010101010101010101010101010101, + a, + ); + let e = _mm512_set_epi16( + 0, 2, 0, 4, 0, 6, 0, 8, 0, 10, 0, 12, 0, 14, 0, 16, 0, 18, 0, 20, 0, 22, 0, 24, 0, 26, + 0, 28, 0, 30, 0, 32, + ); + assert_eq_m512i(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_fpclass_sh_mask() { - let a = _mm_set_sh(f16::INFINITY); - let r = _mm_fpclass_sh_mask::<0x18>(a); - assert_eq!(r, 1); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_cvttph_epu16() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvttph_epu16(a); + let e = _mm_set_epi16(1, 2, 3, 4, 5, 6, 7, 8); + assert_eq_m128i(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_fpclass_sh_mask() { - let a = _mm_set_sh(f16::INFINITY); - let r = _mm_mask_fpclass_sh_mask::<0x18>(0, a); - assert_eq!(r, 0); - let r = _mm_mask_fpclass_sh_mask::<0x18>(1, a); - assert_eq!(r, 1); + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_cvttph_epu16() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let src = _mm_set_epi16(10, 11, 12, 13, 14, 15, 16, 17); + let r = _mm_mask_cvttph_epu16(src, 0b01010101, a); + let e = _mm_set_epi16(10, 2, 12, 4, 14, 6, 16, 8); + assert_eq_m128i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_blend_ph() { + unsafe fn test_mm_maskz_cvttph_epu16() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_set_ph(-1.0, -2.0, -3.0, -4.0, -5.0, -6.0, -7.0, -8.0); - let r = _mm_mask_blend_ph(0b01010101, a, b); - let e = _mm_set_ph(1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0); - assert_eq_m128h(r, e); + let r = _mm_maskz_cvttph_epu16(0b01010101, a); + let e = _mm_set_epi16(0, 2, 0, 4, 0, 6, 0, 8); + assert_eq_m128i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_blend_ph() { + unsafe fn test_mm256_cvttph_epu16() { let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let b = _mm256_set_ph( - -1.0, -2.0, -3.0, -4.0, -5.0, -6.0, -7.0, -8.0, -9.0, -10.0, -11.0, -12.0, -13.0, - -14.0, -15.0, -16.0, + let r = _mm256_cvttph_epu16(a); + let e = _mm256_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_cvttph_epu16() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let r = _mm256_mask_blend_ph(0b0101010101010101, a, b); - let e = _mm256_set_ph( - 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, -14.0, 15.0, - -16.0, + let src = _mm256_set_epi16( + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, ); - assert_eq_m256h(r, e); + let r = _mm256_mask_cvttph_epu16(src, 0b0101010101010101, a); + let e = _mm256_set_epi16(10, 2, 12, 4, 14, 6, 16, 8, 18, 10, 20, 12, 22, 14, 24, 16); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_cvttph_epu16() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let r = _mm256_maskz_cvttph_epu16(0b0101010101010101, a); + let e = _mm256_set_epi16(0, 2, 0, 4, 0, 6, 0, 8, 0, 10, 0, 12, 0, 14, 0, 16); + assert_eq_m256i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_blend_ph() { + unsafe fn test_mm512_cvttph_epu16() { let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, ); - let b = _mm512_set_ph( - -1.0, -2.0, -3.0, -4.0, -5.0, -6.0, -7.0, -8.0, -9.0, -10.0, -11.0, -12.0, -13.0, - -14.0, -15.0, -16.0, -17.0, -18.0, -19.0, -20.0, -21.0, -22.0, -23.0, -24.0, -25.0, - -26.0, -27.0, -28.0, -29.0, -30.0, -31.0, -32.0, - ); - let r = _mm512_mask_blend_ph(0b01010101010101010101010101010101, a, b); - let e = _mm512_set_ph( - 1.0, -2.0, 3.0, -4.0, 5.0, -6.0, 7.0, -8.0, 9.0, -10.0, 11.0, -12.0, 13.0, -14.0, 15.0, - -16.0, 17.0, -18.0, 19.0, -20.0, 21.0, -22.0, 23.0, -24.0, 25.0, -26.0, 27.0, -28.0, - 29.0, -30.0, 31.0, -32.0, + let r = _mm512_cvttph_epu16(a); + let e = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, ); - assert_eq_m512h(r, e); + assert_eq_m512i(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_permutex2var_ph() { - let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let b = _mm_setr_ph(9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); - let idx = _mm_setr_epi16(0, 2, 4, 6, 8, 10, 12, 14); - let r = _mm_permutex2var_ph(a, idx, b); - let e = _mm_setr_ph(1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0); - assert_eq_m128h(r, e); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_cvttph_epu16() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let src = _mm512_set_epi16( + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + ); + let r = _mm512_mask_cvttph_epu16(src, 0b01010101010101010101010101010101, a); + let e = _mm512_set_epi16( + 10, 2, 12, 4, 14, 6, 16, 8, 18, 10, 20, 12, 22, 14, 24, 16, 26, 18, 28, 20, 30, 22, 32, + 24, 34, 26, 36, 28, 38, 30, 40, 32, + ); + assert_eq_m512i(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_permutex2var_ph() { - let a = _mm256_setr_ph( + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_cvttph_epu16() { + let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, ); - let b = _mm256_setr_ph( + let r = _mm512_maskz_cvttph_epu16(0b01010101010101010101010101010101, a); + let e = _mm512_set_epi16( + 0, 2, 0, 4, 0, 6, 0, 8, 0, 10, 0, 12, 0, 14, 0, 16, 0, 18, 0, 20, 0, 22, 0, 24, 0, 26, + 0, 28, 0, 30, 0, 32, + ); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_cvtt_roundph_epu16() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, ); - let idx = _mm256_setr_epi16(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); - let r = _mm256_permutex2var_ph(a, idx, b); - let e = _mm256_setr_ph( - 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, 23.0, 25.0, 27.0, 29.0, - 31.0, + let r = _mm512_cvtt_roundph_epu16::<_MM_FROUND_NO_EXC>(a); + let e = _mm512_set_epi16( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, ); - assert_eq_m256h(r, e); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_permutex2var_ph() { - let a = _mm512_setr_ph( + unsafe fn test_mm512_mask_cvtt_roundph_epu16() { + let a = _mm512_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, ); - let b = _mm512_setr_ph( - 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, - 47.0, 48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, - 61.0, 62.0, 63.0, 64.0, + let src = _mm512_set_epi16( + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, ); - let idx = _mm512_set_epi16( - 62, 60, 58, 56, 54, 52, 50, 48, 46, 44, 42, 40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, - 18, 16, 14, 12, 10, 8, 6, 4, 2, 0, + let r = _mm512_mask_cvtt_roundph_epu16::<_MM_FROUND_NO_EXC>( + src, + 0b01010101010101010101010101010101, + a, ); - let r = _mm512_permutex2var_ph(a, idx, b); - let e = _mm512_setr_ph( - 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, 23.0, 25.0, 27.0, 29.0, - 31.0, 33.0, 35.0, 37.0, 39.0, 41.0, 43.0, 45.0, 47.0, 49.0, 51.0, 53.0, 55.0, 57.0, - 59.0, 61.0, 63.0, + let e = _mm512_set_epi16( + 10, 2, 12, 4, 14, 6, 16, 8, 18, 10, 20, 12, 22, 14, 24, 16, 26, 18, 28, 20, 30, 22, 32, + 24, 34, 26, 36, 28, 38, 30, 40, 32, ); - assert_eq_m512h(r, e); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_cvtt_roundph_epu16() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let r = _mm512_maskz_cvtt_roundph_epu16::<_MM_FROUND_NO_EXC>( + 0b01010101010101010101010101010101, + a, + ); + let e = _mm512_set_epi16( + 0, 2, 0, 4, 0, 6, 0, 8, 0, 10, 0, 12, 0, 14, 0, 16, 0, 18, 0, 20, 0, 22, 0, 24, 0, 26, + 0, 28, 0, 30, 0, 32, + ); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_permutexvar_ph() { + unsafe fn test_mm_cvtph_epi32() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let r = _mm_cvtph_epi32(a); + let e = _mm_set_epi32(1, 2, 3, 4); + assert_eq_m128i(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_cvtph_epi32() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let src = _mm_set_epi32(10, 11, 12, 13); + let r = _mm_mask_cvtph_epi32(src, 0b0101, a); + let e = _mm_set_epi32(10, 2, 12, 4); + assert_eq_m128i(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_cvtph_epi32() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let r = _mm_maskz_cvtph_epi32(0b0101, a); + let e = _mm_set_epi32(0, 2, 0, 4); + assert_eq_m128i(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_cvtph_epi32() { let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let idx = _mm_set_epi16(0, 2, 4, 6, 1, 3, 5, 7); - let r = _mm_permutexvar_ph(idx, a); - let e = _mm_setr_ph(1.0, 3.0, 5.0, 7.0, 2.0, 4.0, 6.0, 8.0); - assert_eq_m128h(r, e); + let r = _mm256_cvtph_epi32(a); + let e = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); + assert_eq_m256i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_permutexvar_ph() { + unsafe fn test_mm256_mask_cvtph_epi32() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let src = _mm256_set_epi32(10, 11, 12, 13, 14, 15, 16, 17); + let r = _mm256_mask_cvtph_epi32(src, 0b01010101, a); + let e = _mm256_set_epi32(10, 2, 12, 4, 14, 6, 16, 8); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_cvtph_epi32() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm256_maskz_cvtph_epi32(0b01010101, a); + let e = _mm256_set_epi32(0, 2, 0, 4, 0, 6, 0, 8); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_cvtph_epi32() { let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let idx = _mm256_set_epi16(0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15); - let r = _mm256_permutexvar_ph(idx, a); - let e = _mm256_setr_ph( - 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, + let r = _mm512_cvtph_epi32(a); + let e = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_cvtph_epi32() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - assert_eq_m256h(r, e); + let src = _mm512_set_epi32( + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + ); + let r = _mm512_mask_cvtph_epi32(src, 0b0101010101010101, a); + let e = _mm512_set_epi32(10, 2, 12, 4, 14, 6, 16, 8, 18, 10, 20, 12, 22, 14, 24, 16); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_permutexvar_ph() { - let a = _mm512_set_ph( + unsafe fn test_mm512_maskz_cvtph_epi32() { + let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, ); - let idx = _mm512_set_epi16( - 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 1, 3, 5, 7, 9, 11, 13, 15, - 17, 19, 21, 23, 25, 27, 29, 31, + let r = _mm512_maskz_cvtph_epi32(0b0101010101010101, a); + let e = _mm512_set_epi32(0, 2, 0, 4, 0, 6, 0, 8, 0, 10, 0, 12, 0, 14, 0, 16); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_cvt_roundph_epi32() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let r = _mm512_permutexvar_ph(idx, a); - let e = _mm512_setr_ph( - 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, 23.0, 25.0, 27.0, 29.0, - 31.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0, 22.0, 24.0, 26.0, 28.0, - 30.0, 32.0, + let r = _mm512_cvt_roundph_epi32::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + let e = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_cvt_roundph_epi32() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - assert_eq_m512h(r, e); + let src = _mm512_set_epi32( + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + ); + let r = _mm512_mask_cvt_roundph_epi32::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, + 0b0101010101010101, + a, + ); + let e = _mm512_set_epi32(10, 2, 12, 4, 14, 6, 16, 8, 18, 10, 20, 12, 22, 14, 24, 16); + assert_eq_m512i(r, e); } - #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_cvtepi16_ph() { - let a = _mm_set_epi16(1, 2, 3, 4, 5, 6, 7, 8); - let r = _mm_cvtepi16_ph(a); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - assert_eq_m128h(r, e); + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_cvt_roundph_epi32() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let r = _mm512_maskz_cvt_roundph_epi32::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, + a, + ); + let e = _mm512_set_epi32(0, 2, 0, 4, 0, 6, 0, 8, 0, 10, 0, 12, 0, 14, 0, 16); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cvtsh_i32() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvtsh_i32(a); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cvt_roundsh_i32() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvt_roundsh_i32::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + assert_eq!(r, 1); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_cvtepi16_ph() { - let a = _mm_set_epi16(1, 2, 3, 4, 5, 6, 7, 8); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm_mask_cvtepi16_ph(src, 0b01010101, a); - let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); - assert_eq_m128h(r, e); + unsafe fn test_mm_cvtph_epu32() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let r = _mm_cvtph_epu32(a); + let e = _mm_set_epi32(1, 2, 3, 4); + assert_eq_m128i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_cvtepi16_ph() { - let a = _mm_set_epi16(1, 2, 3, 4, 5, 6, 7, 8); - let r = _mm_maskz_cvtepi16_ph(0b01010101, a); - let e = _mm_set_ph(0., 2., 0., 4., 0., 6., 0., 8.); - assert_eq_m128h(r, e); + unsafe fn test_mm_mask_cvtph_epu32() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let src = _mm_set_epi32(10, 11, 12, 13); + let r = _mm_mask_cvtph_epu32(src, 0b0101, a); + let e = _mm_set_epi32(10, 2, 12, 4); + assert_eq_m128i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_cvtepi16_ph() { - let a = _mm256_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let r = _mm256_cvtepi16_ph(a); - let e = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - assert_eq_m256h(r, e); + unsafe fn test_mm_maskz_cvtph_epu32() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let r = _mm_maskz_cvtph_epu32(0b0101, a); + let e = _mm_set_epi32(0, 2, 0, 4); + assert_eq_m128i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_cvtepi16_ph() { - let a = _mm256_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let src = _mm256_set_ph( - 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., - ); - let r = _mm256_mask_cvtepi16_ph(src, 0b0101010101010101, a); - let e = _mm256_set_ph( - 10., 2., 12., 4., 14., 6., 16., 8., 18., 10., 20., 12., 22., 14., 24., 16., - ); - assert_eq_m256h(r, e); + unsafe fn test_mm256_cvtph_epu32() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm256_cvtph_epu32(a); + let e = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); + assert_eq_m256i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_cvtepi16_ph() { - let a = _mm256_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let r = _mm256_maskz_cvtepi16_ph(0b0101010101010101, a); - let e = _mm256_set_ph( - 0., 2., 0., 4., 0., 6., 0., 8., 0., 10., 0., 12., 0., 14., 0., 16., - ); - assert_eq_m256h(r, e); + unsafe fn test_mm256_mask_cvtph_epu32() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let src = _mm256_set_epi32(10, 11, 12, 13, 14, 15, 16, 17); + let r = _mm256_mask_cvtph_epu32(src, 0b01010101, a); + let e = _mm256_set_epi32(10, 2, 12, 4, 14, 6, 16, 8); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_cvtph_epu32() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm256_maskz_cvtph_epu32(0b01010101, a); + let e = _mm256_set_epi32(0, 2, 0, 4, 0, 6, 0, 8); + assert_eq_m256i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cvtepi16_ph() { - let a = _mm512_set_epi16( - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, 31, 32, - ); - let r = _mm512_cvtepi16_ph(a); - let e = _mm512_set_ph( + unsafe fn test_mm512_cvtph_epu32() { + let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, ); - assert_eq_m512h(r, e); + let r = _mm512_cvtph_epu32(a); + let e = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cvtepi16_ph() { - let a = _mm512_set_epi16( - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, 31, 32, - ); - let src = _mm512_set_ph( - 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., - 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., + unsafe fn test_mm512_mask_cvtph_epu32() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let r = _mm512_mask_cvtepi16_ph(src, 0b01010101010101010101010101010101, a); - let e = _mm512_set_ph( - 10., 2., 12., 4., 14., 6., 16., 8., 18., 10., 20., 12., 22., 14., 24., 16., 26., 18., - 28., 20., 30., 22., 32., 24., 34., 26., 36., 28., 38., 30., 40., 32., + let src = _mm512_set_epi32( + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, ); - assert_eq_m512h(r, e); + let r = _mm512_mask_cvtph_epu32(src, 0b0101010101010101, a); + let e = _mm512_set_epi32(10, 2, 12, 4, 14, 6, 16, 8, 18, 10, 20, 12, 22, 14, 24, 16); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_cvtepi16_ph() { - let a = _mm512_set_epi16( - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, 31, 32, - ); - let r = _mm512_maskz_cvtepi16_ph(0b01010101010101010101010101010101, a); - let e = _mm512_set_ph( - 0., 2., 0., 4., 0., 6., 0., 8., 0., 10., 0., 12., 0., 14., 0., 16., 0., 18., 0., 20., - 0., 22., 0., 24., 0., 26., 0., 28., 0., 30., 0., 32., + unsafe fn test_mm512_maskz_cvtph_epu32() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - assert_eq_m512h(r, e); + let r = _mm512_maskz_cvtph_epu32(0b0101010101010101, a); + let e = _mm512_set_epi32(0, 2, 0, 4, 0, 6, 0, 8, 0, 10, 0, 12, 0, 14, 0, 16); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cvt_roundepi16_ph() { - let a = _mm512_set_epi16( - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, 31, 32, - ); - let r = _mm512_cvt_roundepi16_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); - let e = _mm512_set_ph( + unsafe fn test_mm512_cvt_roundph_epu32() { + let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, ); - assert_eq_m512h(r, e); + let r = _mm512_cvt_roundph_epu32::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + let e = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cvt_roundepi16_ph() { - let a = _mm512_set_epi16( - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, 31, 32, + unsafe fn test_mm512_mask_cvt_roundph_epu32() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let src = _mm512_set_ph( - 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., - 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., + let src = _mm512_set_epi32( + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, ); - let r = _mm512_mask_cvt_roundepi16_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + let r = _mm512_mask_cvt_roundph_epu32::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( src, - 0b01010101010101010101010101010101, + 0b0101010101010101, a, ); - let e = _mm512_set_ph( - 10., 2., 12., 4., 14., 6., 16., 8., 18., 10., 20., 12., 22., 14., 24., 16., 26., 18., - 28., 20., 30., 22., 32., 24., 34., 26., 36., 28., 38., 30., 40., 32., - ); - assert_eq_m512h(r, e); + let e = _mm512_set_epi32(10, 2, 12, 4, 14, 6, 16, 8, 18, 10, 20, 12, 22, 14, 24, 16); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_cvt_roundepi16_ph() { - let a = _mm512_set_epi16( - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, 31, 32, + unsafe fn test_mm512_maskz_cvt_roundph_epu32() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let r = _mm512_maskz_cvt_roundepi16_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, + let r = _mm512_maskz_cvt_roundph_epu32::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b0101010101010101, a, ); - let e = _mm512_set_ph( - 0., 2., 0., 4., 0., 6., 0., 8., 0., 10., 0., 12., 0., 14., 0., 16., 0., 18., 0., 20., - 0., 22., 0., 24., 0., 26., 0., 28., 0., 30., 0., 32., - ); - assert_eq_m512h(r, e); + let e = _mm512_set_epi32(0, 2, 0, 4, 0, 6, 0, 8, 0, 10, 0, 12, 0, 14, 0, 16); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cvtsh_u32() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvtsh_u32(a); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cvt_roundsh_u32() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvt_roundsh_u32::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + assert_eq!(r, 1); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_cvtepu16_ph() { - let a = _mm_set_epi16(1, 2, 3, 4, 5, 6, 7, 8); - let r = _mm_cvtepu16_ph(a); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - assert_eq_m128h(r, e); + unsafe fn test_mm_cvttph_epi32() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let r = _mm_cvttph_epi32(a); + let e = _mm_set_epi32(1, 2, 3, 4); + assert_eq_m128i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_cvtepu16_ph() { - let a = _mm_set_epi16(1, 2, 3, 4, 5, 6, 7, 8); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm_mask_cvtepu16_ph(src, 0b01010101, a); - let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); - assert_eq_m128h(r, e); + unsafe fn test_mm_mask_cvttph_epi32() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let src = _mm_set_epi32(10, 11, 12, 13); + let r = _mm_mask_cvttph_epi32(src, 0b0101, a); + let e = _mm_set_epi32(10, 2, 12, 4); + assert_eq_m128i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_cvtepu16_ph() { - let a = _mm_set_epi16(1, 2, 3, 4, 5, 6, 7, 8); - let r = _mm_maskz_cvtepu16_ph(0b01010101, a); - let e = _mm_set_ph(0., 2., 0., 4., 0., 6., 0., 8.); - assert_eq_m128h(r, e); + unsafe fn test_mm_maskz_cvttph_epi32() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let r = _mm_maskz_cvttph_epi32(0b0101, a); + let e = _mm_set_epi32(0, 2, 0, 4); + assert_eq_m128i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_cvtepu16_ph() { - let a = _mm256_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let r = _mm256_cvtepu16_ph(a); - let e = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - assert_eq_m256h(r, e); + unsafe fn test_mm256_cvttph_epi32() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm256_cvttph_epi32(a); + let e = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); + assert_eq_m256i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_cvtepu16_ph() { - let a = _mm256_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let src = _mm256_set_ph( - 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., - ); - let r = _mm256_mask_cvtepu16_ph(src, 0b0101010101010101, a); - let e = _mm256_set_ph( - 10., 2., 12., 4., 14., 6., 16., 8., 18., 10., 20., 12., 22., 14., 24., 16., - ); - assert_eq_m256h(r, e); + unsafe fn test_mm256_mask_cvttph_epi32() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let src = _mm256_set_epi32(10, 11, 12, 13, 14, 15, 16, 17); + let r = _mm256_mask_cvttph_epi32(src, 0b01010101, a); + let e = _mm256_set_epi32(10, 2, 12, 4, 14, 6, 16, 8); + assert_eq_m256i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_cvtepu16_ph() { - let a = _mm256_set_epi16(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let r = _mm256_maskz_cvtepu16_ph(0b0101010101010101, a); - let e = _mm256_set_ph( - 0., 2., 0., 4., 0., 6., 0., 8., 0., 10., 0., 12., 0., 14., 0., 16., - ); - assert_eq_m256h(r, e); + unsafe fn test_mm256_maskz_cvttph_epi32() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm256_maskz_cvttph_epi32(0b01010101, a); + let e = _mm256_set_epi32(0, 2, 0, 4, 0, 6, 0, 8); + assert_eq_m256i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cvtepu16_ph() { - let a = _mm512_set_epi16( - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, 31, 32, - ); - let r = _mm512_cvtepu16_ph(a); - let e = _mm512_set_ph( + unsafe fn test_mm512_cvttph_epi32() { + let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, ); - assert_eq_m512h(r, e); + let r = _mm512_cvttph_epi32(a); + let e = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cvtepu16_ph() { - let a = _mm512_set_epi16( - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, 31, 32, - ); - let src = _mm512_set_ph( - 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., - 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., + unsafe fn test_mm512_mask_cvttph_epi32() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let r = _mm512_mask_cvtepu16_ph(src, 0b01010101010101010101010101010101, a); - let e = _mm512_set_ph( - 10., 2., 12., 4., 14., 6., 16., 8., 18., 10., 20., 12., 22., 14., 24., 16., 26., 18., - 28., 20., 30., 22., 32., 24., 34., 26., 36., 28., 38., 30., 40., 32., + let src = _mm512_set_epi32( + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, ); - assert_eq_m512h(r, e); + let r = _mm512_mask_cvttph_epi32(src, 0b0101010101010101, a); + let e = _mm512_set_epi32(10, 2, 12, 4, 14, 6, 16, 8, 18, 10, 20, 12, 22, 14, 24, 16); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_cvtepu16_ph() { - let a = _mm512_set_epi16( - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, 31, 32, - ); - let r = _mm512_maskz_cvtepu16_ph(0b01010101010101010101010101010101, a); - let e = _mm512_set_ph( - 0., 2., 0., 4., 0., 6., 0., 8., 0., 10., 0., 12., 0., 14., 0., 16., 0., 18., 0., 20., - 0., 22., 0., 24., 0., 26., 0., 28., 0., 30., 0., 32., + unsafe fn test_mm512_maskz_cvttph_epi32() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - assert_eq_m512h(r, e); + let r = _mm512_maskz_cvttph_epi32(0b0101010101010101, a); + let e = _mm512_set_epi32(0, 2, 0, 4, 0, 6, 0, 8, 0, 10, 0, 12, 0, 14, 0, 16); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cvt_roundepu16_ph() { - let a = _mm512_set_epi16( - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, 31, 32, - ); - let r = _mm512_cvt_roundepu16_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); - let e = _mm512_set_ph( + unsafe fn test_mm512_cvtt_roundph_epi32() { + let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, - 31.0, 32.0, ); - assert_eq_m512h(r, e); + let r = _mm512_cvtt_roundph_epi32::<_MM_FROUND_NO_EXC>(a); + let e = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cvt_roundepu16_ph() { - let a = _mm512_set_epi16( - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, 31, 32, - ); - let src = _mm512_set_ph( - 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., - 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., - ); - let r = _mm512_mask_cvt_roundepu16_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, - 0b01010101010101010101010101010101, - a, + unsafe fn test_mm512_mask_cvtt_roundph_epi32() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let e = _mm512_set_ph( - 10., 2., 12., 4., 14., 6., 16., 8., 18., 10., 20., 12., 22., 14., 24., 16., 26., 18., - 28., 20., 30., 22., 32., 24., 34., 26., 36., 28., 38., 30., 40., 32., + let src = _mm512_set_epi32( + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, ); - assert_eq_m512h(r, e); + let r = _mm512_mask_cvtt_roundph_epi32::<_MM_FROUND_NO_EXC>(src, 0b0101010101010101, a); + let e = _mm512_set_epi32(10, 2, 12, 4, 14, 6, 16, 8, 18, 10, 20, 12, 22, 14, 24, 16); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_cvt_roundepu16_ph() { - let a = _mm512_set_epi16( - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, 31, 32, - ); - let r = _mm512_maskz_cvt_roundepu16_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101010101010101010101010101, - a, - ); - let e = _mm512_set_ph( - 0., 2., 0., 4., 0., 6., 0., 8., 0., 10., 0., 12., 0., 14., 0., 16., 0., 18., 0., 20., - 0., 22., 0., 24., 0., 26., 0., 28., 0., 30., 0., 32., + unsafe fn test_mm512_maskz_cvtt_roundph_epi32() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - assert_eq_m512h(r, e); + let r = _mm512_maskz_cvtt_roundph_epi32::<_MM_FROUND_NO_EXC>(0b0101010101010101, a); + let e = _mm512_set_epi32(0, 2, 0, 4, 0, 6, 0, 8, 0, 10, 0, 12, 0, 14, 0, 16); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cvttsh_i32() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvttsh_i32(a); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cvtt_roundsh_i32() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvtt_roundsh_i32::<_MM_FROUND_NO_EXC>(a); + assert_eq!(r, 1); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_cvtepi32_ph() { - let a = _mm_set_epi32(1, 2, 3, 4); - let r = _mm_cvtepi32_ph(a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); - assert_eq_m128h(r, e); + unsafe fn test_mm_cvttph_epu32() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let r = _mm_cvttph_epu32(a); + let e = _mm_set_epi32(1, 2, 3, 4); + assert_eq_m128i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_cvtepi32_ph() { - let a = _mm_set_epi32(1, 2, 3, 4); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm_mask_cvtepi32_ph(src, 0b0101, a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 14., 2., 16., 4.); - assert_eq_m128h(r, e); + unsafe fn test_mm_mask_cvttph_epu32() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let src = _mm_set_epi32(10, 11, 12, 13); + let r = _mm_mask_cvttph_epu32(src, 0b0101, a); + let e = _mm_set_epi32(10, 2, 12, 4); + assert_eq_m128i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_cvtepi32_ph() { - let a = _mm_set_epi32(1, 2, 3, 4); - let r = _mm_maskz_cvtepi32_ph(0b0101, a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 2., 0.0, 4.); - assert_eq_m128h(r, e); + unsafe fn test_mm_maskz_cvttph_epu32() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let r = _mm_maskz_cvttph_epu32(0b0101, a); + let e = _mm_set_epi32(0, 2, 0, 4); + assert_eq_m128i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_cvtepi32_ph() { - let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); - let r = _mm256_cvtepi32_ph(a); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - assert_eq_m128h(r, e); + unsafe fn test_mm256_cvttph_epu32() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm256_cvttph_epu32(a); + let e = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); + assert_eq_m256i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_cvtepi32_ph() { - let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm256_mask_cvtepi32_ph(src, 0b01010101, a); - let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); - assert_eq_m128h(r, e); + unsafe fn test_mm256_mask_cvttph_epu32() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let src = _mm256_set_epi32(10, 11, 12, 13, 14, 15, 16, 17); + let r = _mm256_mask_cvttph_epu32(src, 0b01010101, a); + let e = _mm256_set_epi32(10, 2, 12, 4, 14, 6, 16, 8); + assert_eq_m256i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_cvtepi32_ph() { - let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); - let r = _mm256_maskz_cvtepi32_ph(0b01010101, a); - let e = _mm_set_ph(0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0); - assert_eq_m128h(r, e); + unsafe fn test_mm256_maskz_cvttph_epu32() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm256_maskz_cvttph_epu32(0b01010101, a); + let e = _mm256_set_epi32(0, 2, 0, 4, 0, 6, 0, 8); + assert_eq_m256i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cvtepi32_ph() { - let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let r = _mm512_cvtepi32_ph(a); - let e = _mm256_set_ph( + unsafe fn test_mm512_cvttph_epu32() { + let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - assert_eq_m256h(r, e); + let r = _mm512_cvttph_epu32(a); + let e = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cvtepi32_ph() { - let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let src = _mm256_set_ph( - 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., + unsafe fn test_mm512_mask_cvttph_epu32() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let r = _mm512_mask_cvtepi32_ph(src, 0b0101010101010101, a); - let e = _mm256_set_ph( - 10., 2., 12., 4., 14., 6., 16., 8., 18., 10., 20., 12., 22., 14., 24., 16., + let src = _mm512_set_epi32( + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, ); - assert_eq_m256h(r, e); + let r = _mm512_mask_cvttph_epu32(src, 0b0101010101010101, a); + let e = _mm512_set_epi32(10, 2, 12, 4, 14, 6, 16, 8, 18, 10, 20, 12, 22, 14, 24, 16); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_cvtepi32_ph() { - let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let r = _mm512_maskz_cvtepi32_ph(0b0101010101010101, a); - let e = _mm256_set_ph( - 0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0, 0.0, 10.0, 0.0, 12.0, 0.0, 14.0, 0.0, 16.0, + unsafe fn test_mm512_maskz_cvttph_epu32() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - assert_eq_m256h(r, e); + let r = _mm512_maskz_cvttph_epu32(0b0101010101010101, a); + let e = _mm512_set_epi32(0, 2, 0, 4, 0, 6, 0, 8, 0, 10, 0, 12, 0, 14, 0, 16); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cvt_roundepi32_ph() { - let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let r = _mm512_cvt_roundepi32_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); - let e = _mm256_set_ph( + unsafe fn test_mm512_cvtt_roundph_epu32() { + let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - assert_eq_m256h(r, e); + let r = _mm512_cvtt_roundph_epu32::<_MM_FROUND_NO_EXC>(a); + let e = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cvt_roundepi32_ph() { - let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let src = _mm256_set_ph( - 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., - ); - let r = _mm512_mask_cvt_roundepi32_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, - 0b0101010101010101, - a, + unsafe fn test_mm512_mask_cvtt_roundph_epu32() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let e = _mm256_set_ph( - 10., 2., 12., 4., 14., 6., 16., 8., 18., 10., 20., 12., 22., 14., 24., 16., + let src = _mm512_set_epi32( + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, ); - assert_eq_m256h(r, e); + let r = _mm512_mask_cvtt_roundph_epu32::<_MM_FROUND_NO_EXC>(src, 0b0101010101010101, a); + let e = _mm512_set_epi32(10, 2, 12, 4, 14, 6, 16, 8, 18, 10, 20, 12, 22, 14, 24, 16); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_cvt_roundepi32_ph() { - let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let r = _mm512_maskz_cvt_roundepi32_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b0101010101010101, - a, - ); - let e = _mm256_set_ph( - 0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0, 0.0, 10.0, 0.0, 12.0, 0.0, 14.0, 0.0, 16.0, + unsafe fn test_mm512_maskz_cvtt_roundph_epu32() { + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - assert_eq_m256h(r, e); + let r = _mm512_maskz_cvtt_roundph_epu32::<_MM_FROUND_NO_EXC>(0b0101010101010101, a); + let e = _mm512_set_epi32(0, 2, 0, 4, 0, 6, 0, 8, 0, 10, 0, 12, 0, 14, 0, 16); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cvti32_sh() { + unsafe fn test_mm_cvttsh_u32() { let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let r = _mm_cvti32_sh(a, 10); - let e = _mm_setr_ph(10.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - assert_eq_m128h(r, e); + let r = _mm_cvttsh_u32(a); + assert_eq!(r, 1); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cvt_roundi32_sh() { + unsafe fn test_mm_cvtt_roundsh_u32() { let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let r = _mm_cvt_roundi32_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, 10); - let e = _mm_setr_ph(10.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - assert_eq_m128h(r, e); + let r = _mm_cvtt_roundsh_u32::<_MM_FROUND_NO_EXC>(a); + assert_eq!(r, 1); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_cvtepu32_ph() { - let a = _mm_set_epi32(1, 2, 3, 4); - let r = _mm_cvtepu32_ph(a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); - assert_eq_m128h(r, e); + unsafe fn test_mm_cvtph_epi64() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0); + let r = _mm_cvtph_epi64(a); + let e = _mm_set_epi64x(1, 2); + assert_eq_m128i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_cvtepu32_ph() { - let a = _mm_set_epi32(1, 2, 3, 4); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm_mask_cvtepu32_ph(src, 0b0101, a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 14., 2., 16., 4.); - assert_eq_m128h(r, e); + unsafe fn test_mm_mask_cvtph_epi64() { + let src = _mm_set_epi64x(3, 4); + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0); + let r = _mm_mask_cvtph_epi64(src, 0b01, a); + let e = _mm_set_epi64x(3, 2); + assert_eq_m128i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_cvtepu32_ph() { - let a = _mm_set_epi32(1, 2, 3, 4); - let r = _mm_maskz_cvtepu32_ph(0b0101, a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 2., 0.0, 4.); - assert_eq_m128h(r, e); + unsafe fn test_mm_maskz_cvtph_epi64() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0); + let r = _mm_maskz_cvtph_epi64(0b01, a); + let e = _mm_set_epi64x(0, 2); + assert_eq_m128i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_cvtepu32_ph() { - let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); - let r = _mm256_cvtepu32_ph(a); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - assert_eq_m128h(r, e); + unsafe fn test_mm256_cvtph_epi64() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let r = _mm256_cvtph_epi64(a); + let e = _mm256_set_epi64x(1, 2, 3, 4); + assert_eq_m256i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_cvtepu32_ph() { - let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm256_mask_cvtepu32_ph(src, 0b01010101, a); - let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); - assert_eq_m128h(r, e); + unsafe fn test_mm256_mask_cvtph_epi64() { + let src = _mm256_set_epi64x(5, 6, 7, 8); + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let r = _mm256_mask_cvtph_epi64(src, 0b0101, a); + let e = _mm256_set_epi64x(5, 2, 7, 4); + assert_eq_m256i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_cvtepu32_ph() { - let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); - let r = _mm256_maskz_cvtepu32_ph(0b01010101, a); - let e = _mm_set_ph(0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0); - assert_eq_m128h(r, e); + unsafe fn test_mm256_maskz_cvtph_epi64() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let r = _mm256_maskz_cvtph_epi64(0b0101, a); + let e = _mm256_set_epi64x(0, 2, 0, 4); + assert_eq_m256i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cvtepu32_ph() { - let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let r = _mm512_cvtepu32_ph(a); - let e = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - assert_eq_m256h(r, e); + unsafe fn test_mm512_cvtph_epi64() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_cvtph_epi64(a); + let e = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cvtepu32_ph() { - let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let src = _mm256_set_ph( - 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., - ); - let r = _mm512_mask_cvtepu32_ph(src, 0b0101010101010101, a); - let e = _mm256_set_ph( - 10., 2.0, 12., 4.0, 14., 6.0, 16., 8.0, 18., 10.0, 20., 12.0, 22., 14.0, 24., 16.0, - ); - assert_eq_m256h(r, e); + unsafe fn test_mm512_mask_cvtph_epi64() { + let src = _mm512_set_epi64(9, 10, 11, 12, 13, 14, 15, 16); + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_mask_cvtph_epi64(src, 0b01010101, a); + let e = _mm512_set_epi64(9, 2, 11, 4, 13, 6, 15, 8); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_cvtepu32_ph() { - let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let r = _mm512_maskz_cvtepu32_ph(0b0101010101010101, a); - let e = _mm256_set_ph( - 0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0, 0.0, 10.0, 0.0, 12.0, 0.0, 14.0, 0.0, 16.0, - ); - assert_eq_m256h(r, e); + unsafe fn test_mm512_maskz_cvtph_epi64() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_maskz_cvtph_epi64(0b01010101, a); + let e = _mm512_set_epi64(0, 2, 0, 4, 0, 6, 0, 8); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cvt_roundepu32_ph() { - let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let r = _mm512_cvt_roundepu32_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); - let e = _mm256_set_ph( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - ); - assert_eq_m256h(r, e); + unsafe fn test_mm512_cvt_roundph_epi64() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_cvt_roundph_epi64::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + let e = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cvt_roundepu32_ph() { - let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let src = _mm256_set_ph( - 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., - ); - let r = _mm512_mask_cvt_roundepu32_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, - 0b0101010101010101, - a, - ); - let e = _mm256_set_ph( - 10.0, 2.0, 12.0, 4.0, 14.0, 6.0, 16.0, 8.0, 18.0, 10.0, 20.0, 12.0, 22.0, 14.0, 24.0, - 16.0, + unsafe fn test_mm512_mask_cvt_roundph_epi64() { + let src = _mm512_set_epi64(9, 10, 11, 12, 13, 14, 15, 16); + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_mask_cvt_roundph_epi64::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0b01010101, a, ); - assert_eq_m256h(r, e); + let e = _mm512_set_epi64(9, 2, 11, 4, 13, 6, 15, 8); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_cvt_roundepu32_ph() { - let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); - let r = _mm512_maskz_cvt_roundepu32_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b0101010101010101, - a, - ); - let e = _mm256_set_ph( - 0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0, 0.0, 10.0, 0.0, 12.0, 0.0, 14.0, 0.0, 16.0, + unsafe fn test_mm512_maskz_cvt_roundph_epi64() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_maskz_cvt_roundph_epi64::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101, a, ); - assert_eq_m256h(r, e); + let e = _mm512_set_epi64(0, 2, 0, 4, 0, 6, 0, 8); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_cvtph_epu64() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0); + let r = _mm_cvtph_epu64(a); + let e = _mm_set_epi64x(1, 2); + assert_eq_m128i(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_mask_cvtph_epu64() { + let src = _mm_set_epi64x(3, 4); + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0); + let r = _mm_mask_cvtph_epu64(src, 0b01, a); + let e = _mm_set_epi64x(3, 2); + assert_eq_m128i(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm_maskz_cvtph_epu64() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0); + let r = _mm_maskz_cvtph_epu64(0b01, a); + let e = _mm_set_epi64x(0, 2); + assert_eq_m128i(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_cvtph_epu64() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let r = _mm256_cvtph_epu64(a); + let e = _mm256_set_epi64x(1, 2, 3, 4); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_mask_cvtph_epu64() { + let src = _mm256_set_epi64x(5, 6, 7, 8); + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let r = _mm256_mask_cvtph_epu64(src, 0b0101, a); + let e = _mm256_set_epi64x(5, 2, 7, 4); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512fp16,avx512vl")] + unsafe fn test_mm256_maskz_cvtph_epu64() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let r = _mm256_maskz_cvtph_epu64(0b0101, a); + let e = _mm256_set_epi64x(0, 2, 0, 4); + assert_eq_m256i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_cvtph_epu64() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_cvtph_epu64(a); + let e = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_cvtph_epu64() { + let src = _mm512_set_epi64(9, 10, 11, 12, 13, 14, 15, 16); + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_mask_cvtph_epu64(src, 0b01010101, a); + let e = _mm512_set_epi64(9, 2, 11, 4, 13, 6, 15, 8); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_cvtph_epu64() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_maskz_cvtph_epu64(0b01010101, a); + let e = _mm512_set_epi64(0, 2, 0, 4, 0, 6, 0, 8); + assert_eq_m512i(r, e); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_cvt_roundph_epu64() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_cvt_roundph_epu64::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + let e = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cvtu32_sh() { - let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let r = _mm_cvtu32_sh(a, 10); - let e = _mm_setr_ph(10.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_mask_cvt_roundph_epu64() { + let src = _mm512_set_epi64(9, 10, 11, 12, 13, 14, 15, 16); + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_mask_cvt_roundph_epu64::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + src, 0b01010101, a, + ); + let e = _mm512_set_epi64(9, 2, 11, 4, 13, 6, 15, 8); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cvt_roundu32_sh() { - let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let r = _mm_cvt_roundu32_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, 10); - let e = _mm_setr_ph(10.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_maskz_cvt_roundph_epu64() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_maskz_cvt_roundph_epu64::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( + 0b01010101, a, + ); + let e = _mm512_set_epi64(0, 2, 0, 4, 0, 6, 0, 8); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_cvtepi64_ph() { - let a = _mm_set_epi64x(1, 2); - let r = _mm_cvtepi64_ph(a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0); - assert_eq_m128h(r, e); + unsafe fn test_mm_cvttph_epi64() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0); + let r = _mm_cvttph_epi64(a); + let e = _mm_set_epi64x(1, 2); + assert_eq_m128i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_cvtepi64_ph() { - let a = _mm_set_epi64x(1, 2); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm_mask_cvtepi64_ph(src, 0b01, a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 16., 2.); - assert_eq_m128h(r, e); + unsafe fn test_mm_mask_cvttph_epi64() { + let src = _mm_set_epi64x(3, 4); + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0); + let r = _mm_mask_cvttph_epi64(src, 0b01, a); + let e = _mm_set_epi64x(3, 2); + assert_eq_m128i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_cvtepi64_ph() { - let a = _mm_set_epi64x(1, 2); - let r = _mm_maskz_cvtepi64_ph(0b01, a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.); - assert_eq_m128h(r, e); + unsafe fn test_mm_maskz_cvttph_epi64() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0); + let r = _mm_maskz_cvttph_epi64(0b01, a); + let e = _mm_set_epi64x(0, 2); + assert_eq_m128i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_cvtepi64_ph() { - let a = _mm256_set_epi64x(1, 2, 3, 4); - let r = _mm256_cvtepi64_ph(a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); - assert_eq_m128h(r, e); + unsafe fn test_mm256_cvttph_epi64() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let r = _mm256_cvttph_epi64(a); + let e = _mm256_set_epi64x(1, 2, 3, 4); + assert_eq_m256i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_cvtepi64_ph() { - let a = _mm256_set_epi64x(1, 2, 3, 4); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm256_mask_cvtepi64_ph(src, 0b0101, a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 14., 2.0, 16.0, 4.0); - assert_eq_m128h(r, e); + unsafe fn test_mm256_mask_cvttph_epi64() { + let src = _mm256_set_epi64x(5, 6, 7, 8); + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let r = _mm256_mask_cvttph_epi64(src, 0b0101, a); + let e = _mm256_set_epi64x(5, 2, 7, 4); + assert_eq_m256i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_cvtepi64_ph() { - let a = _mm256_set_epi64x(1, 2, 3, 4); - let r = _mm256_maskz_cvtepi64_ph(0b0101, a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 4.0); - assert_eq_m128h(r, e); + unsafe fn test_mm256_maskz_cvttph_epi64() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let r = _mm256_maskz_cvttph_epi64(0b0101, a); + let e = _mm256_set_epi64x(0, 2, 0, 4); + assert_eq_m256i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cvtepi64_ph() { - let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); - let r = _mm512_cvtepi64_ph(a); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_cvttph_epi64() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_cvttph_epi64(a); + let e = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cvtepi64_ph() { - let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm512_mask_cvtepi64_ph(src, 0b01010101, a); - let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); - assert_eq_m128h(r, e); + unsafe fn test_mm512_mask_cvttph_epi64() { + let src = _mm512_set_epi64(9, 10, 11, 12, 13, 14, 15, 16); + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_mask_cvttph_epi64(src, 0b01010101, a); + let e = _mm512_set_epi64(9, 2, 11, 4, 13, 6, 15, 8); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_cvtepi64_ph() { - let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); - let r = _mm512_maskz_cvtepi64_ph(0b01010101, a); - let e = _mm_set_ph(0.0, 2., 0.0, 4., 0.0, 6., 0.0, 8.); - assert_eq_m128h(r, e); + unsafe fn test_mm512_maskz_cvttph_epi64() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_maskz_cvttph_epi64(0b01010101, a); + let e = _mm512_set_epi64(0, 2, 0, 4, 0, 6, 0, 8); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cvt_roundepi64_ph() { - let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); - let r = _mm512_cvt_roundepi64_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_cvtt_roundph_epi64() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_cvtt_roundph_epi64::<_MM_FROUND_NO_EXC>(a); + let e = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cvt_roundepi64_ph() { - let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm512_mask_cvt_roundepi64_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0b01010101, a, - ); - let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); - assert_eq_m128h(r, e); + unsafe fn test_mm512_mask_cvtt_roundph_epi64() { + let src = _mm512_set_epi64(9, 10, 11, 12, 13, 14, 15, 16); + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_mask_cvtt_roundph_epi64::<_MM_FROUND_NO_EXC>(src, 0b01010101, a); + let e = _mm512_set_epi64(9, 2, 11, 4, 13, 6, 15, 8); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_cvt_roundepi64_ph() { - let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); - let r = _mm512_maskz_cvt_roundepi64_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101, a, - ); - let e = _mm_set_ph(0.0, 2., 0.0, 4., 0.0, 6., 0.0, 8.); - assert_eq_m128h(r, e); + unsafe fn test_mm512_maskz_cvtt_roundph_epi64() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_maskz_cvtt_roundph_epi64::<_MM_FROUND_NO_EXC>(0b01010101, a); + let e = _mm512_set_epi64(0, 2, 0, 4, 0, 6, 0, 8); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_cvtepu64_ph() { - let a = _mm_set_epi64x(1, 2); - let r = _mm_cvtepu64_ph(a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0); - assert_eq_m128h(r, e); + unsafe fn test_mm_cvttph_epu64() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0); + let r = _mm_cvttph_epu64(a); + let e = _mm_set_epi64x(1, 2); + assert_eq_m128i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_cvtepu64_ph() { - let a = _mm_set_epi64x(1, 2); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm_mask_cvtepu64_ph(src, 0b01, a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 16., 2.); - assert_eq_m128h(r, e); + unsafe fn test_mm_mask_cvttph_epu64() { + let src = _mm_set_epi64x(3, 4); + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0); + let r = _mm_mask_cvttph_epu64(src, 0b01, a); + let e = _mm_set_epi64x(3, 2); + assert_eq_m128i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_cvtepu64_ph() { - let a = _mm_set_epi64x(1, 2); - let r = _mm_maskz_cvtepu64_ph(0b01, a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0); - assert_eq_m128h(r, e); + unsafe fn test_mm_maskz_cvttph_epu64() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0); + let r = _mm_maskz_cvttph_epu64(0b01, a); + let e = _mm_set_epi64x(0, 2); + assert_eq_m128i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_cvtepu64_ph() { - let a = _mm256_set_epi64x(1, 2, 3, 4); - let r = _mm256_cvtepu64_ph(a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); - assert_eq_m128h(r, e); + unsafe fn test_mm256_cvttph_epu64() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let r = _mm256_cvttph_epu64(a); + let e = _mm256_set_epi64x(1, 2, 3, 4); + assert_eq_m256i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_cvtepu64_ph() { - let a = _mm256_set_epi64x(1, 2, 3, 4); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm256_mask_cvtepu64_ph(src, 0b0101, a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 14., 2.0, 16.0, 4.0); - assert_eq_m128h(r, e); + unsafe fn test_mm256_mask_cvttph_epu64() { + let src = _mm256_set_epi64x(5, 6, 7, 8); + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let r = _mm256_mask_cvttph_epu64(src, 0b0101, a); + let e = _mm256_set_epi64x(5, 2, 7, 4); + assert_eq_m256i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_cvtepu64_ph() { - let a = _mm256_set_epi64x(1, 2, 3, 4); - let r = _mm256_maskz_cvtepu64_ph(0b0101, a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 4.0); - assert_eq_m128h(r, e); + unsafe fn test_mm256_maskz_cvttph_epu64() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let r = _mm256_maskz_cvttph_epu64(0b0101, a); + let e = _mm256_set_epi64x(0, 2, 0, 4); + assert_eq_m256i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cvtepu64_ph() { - let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); - let r = _mm512_cvtepu64_ph(a); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_cvttph_epu64() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_cvttph_epu64(a); + let e = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cvtepu64_ph() { - let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm512_mask_cvtepu64_ph(src, 0b01010101, a); - let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); - assert_eq_m128h(r, e); + unsafe fn test_mm512_mask_cvttph_epu64() { + let src = _mm512_set_epi64(9, 10, 11, 12, 13, 14, 15, 16); + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_mask_cvttph_epu64(src, 0b01010101, a); + let e = _mm512_set_epi64(9, 2, 11, 4, 13, 6, 15, 8); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_cvtepu64_ph() { - let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); - let r = _mm512_maskz_cvtepu64_ph(0b01010101, a); - let e = _mm_set_ph(0.0, 2., 0.0, 4., 0.0, 6., 0.0, 8.); - assert_eq_m128h(r, e); + unsafe fn test_mm512_maskz_cvttph_epu64() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_maskz_cvttph_epu64(0b01010101, a); + let e = _mm512_set_epi64(0, 2, 0, 4, 0, 6, 0, 8); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cvt_roundepu64_ph() { - let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); - let r = _mm512_cvt_roundepu64_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_cvtt_roundph_epu64() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_cvtt_roundph_epu64::<_MM_FROUND_NO_EXC>(a); + let e = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cvt_roundepu64_ph() { - let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm512_mask_cvt_roundepu64_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0b01010101, a, - ); - let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); - assert_eq_m128h(r, e); + unsafe fn test_mm512_mask_cvtt_roundph_epu64() { + let src = _mm512_set_epi64(9, 10, 11, 12, 13, 14, 15, 16); + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_mask_cvtt_roundph_epu64::<_MM_FROUND_NO_EXC>(src, 0b01010101, a); + let e = _mm512_set_epi64(9, 2, 11, 4, 13, 6, 15, 8); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_cvt_roundepu64_ph() { - let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); - let r = _mm512_maskz_cvt_roundepu64_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101, a, - ); - let e = _mm_set_ph(0.0, 2., 0.0, 4., 0.0, 6., 0.0, 8.); - assert_eq_m128h(r, e); + unsafe fn test_mm512_maskz_cvtt_roundph_epu64() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_maskz_cvtt_roundph_epu64::<_MM_FROUND_NO_EXC>(0b01010101, a); + let e = _mm512_set_epi64(0, 2, 0, 4, 0, 6, 0, 8); + assert_eq_m512i(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_cvtxps_ph() { - let a = _mm_set_ps(1.0, 2.0, 3.0, 4.0); - let r = _mm_cvtxps_ph(a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); - assert_eq_m128h(r, e); + unsafe fn test_mm_cvtxph_ps() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let r = _mm_cvtxph_ps(a); + let e = _mm_set_ps(1.0, 2.0, 3.0, 4.0); + assert_eq_m128(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_cvtxps_ph() { - let a = _mm_set_ps(1.0, 2.0, 3.0, 4.0); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm_mask_cvtxps_ph(src, 0b0101, a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 14., 2.0, 16., 4.0); - assert_eq_m128h(r, e); + unsafe fn test_mm_mask_cvtxph_ps() { + let src = _mm_set_ps(10.0, 11.0, 12.0, 13.0); + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let r = _mm_mask_cvtxph_ps(src, 0b0101, a); + let e = _mm_set_ps(10.0, 2.0, 12.0, 4.0); + assert_eq_m128(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_cvtxps_ph() { - let a = _mm_set_ps(1.0, 2.0, 3.0, 4.0); - let r = _mm_maskz_cvtxps_ph(0b0101, a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 4.0); - assert_eq_m128h(r, e); + unsafe fn test_mm_maskz_cvtxph_ps() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let r = _mm_maskz_cvtxph_ps(0b0101, a); + let e = _mm_set_ps(0.0, 2.0, 0.0, 4.0); + assert_eq_m128(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_cvtxps_ph() { - let a = _mm256_set_ps(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let r = _mm256_cvtxps_ph(a); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - assert_eq_m128h(r, e); + unsafe fn test_mm256_cvtxph_ps() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm256_cvtxph_ps(a); + let e = _mm256_set_ps(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + assert_eq_m256(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_cvtxps_ph() { - let a = _mm256_set_ps(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm256_mask_cvtxps_ph(src, 0b01010101, a); - let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); - assert_eq_m128h(r, e); + unsafe fn test_mm256_mask_cvtxph_ps() { + let src = _mm256_set_ps(10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0); + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm256_mask_cvtxph_ps(src, 0b01010101, a); + let e = _mm256_set_ps(10.0, 2.0, 12.0, 4.0, 14.0, 6.0, 16.0, 8.0); + assert_eq_m256(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_cvtxps_ph() { - let a = _mm256_set_ps(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let r = _mm256_maskz_cvtxps_ph(0b01010101, a); - let e = _mm_set_ph(0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0); - assert_eq_m128h(r, e); + unsafe fn test_mm256_maskz_cvtxph_ps() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm256_maskz_cvtxph_ps(0b01010101, a); + let e = _mm256_set_ps(0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0); + assert_eq_m256(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cvtxps_ph() { - let a = _mm512_set_ps( + unsafe fn test_mm512_cvtxph_ps() { + let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let r = _mm512_cvtxps_ph(a); - let e = _mm256_set_ph( + let r = _mm512_cvtxph_ps(a); + let e = _mm512_set_ps( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - assert_eq_m256h(r, e); + assert_eq_m512(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cvtxps_ph() { - let a = _mm512_set_ps( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + unsafe fn test_mm512_mask_cvtxph_ps() { + let src = _mm512_set_ps( + 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, + 24.0, 25.0, ); - let src = _mm256_set_ph( - 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let r = _mm512_mask_cvtxps_ph(src, 0b0101010101010101, a); - let e = _mm256_set_ph( - 10., 2.0, 12., 4.0, 14., 6.0, 16., 8.0, 18., 10.0, 20., 12.0, 22., 14.0, 24., 16.0, + let r = _mm512_mask_cvtxph_ps(src, 0b0101010101010101, a); + let e = _mm512_set_ps( + 10.0, 2.0, 12.0, 4.0, 14.0, 6.0, 16.0, 8.0, 18.0, 10.0, 20.0, 12.0, 22.0, 14.0, 24.0, + 16.0, ); - assert_eq_m256h(r, e); + assert_eq_m512(r, e); } - #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_cvtxps_ph() { - let a = _mm512_set_ps( + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_maskz_cvtxph_ps() { + let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let r = _mm512_maskz_cvtxps_ph(0b0101010101010101, a); - let e = _mm256_set_ph( + let r = _mm512_maskz_cvtxph_ps(0b0101010101010101, a); + let e = _mm512_set_ps( 0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0, 0.0, 10.0, 0.0, 12.0, 0.0, 14.0, 0.0, 16.0, ); - assert_eq_m256h(r, e); + assert_eq_m512(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cvtx_roundps_ph() { - let a = _mm512_set_ps( + unsafe fn test_mm512_cvtx_roundph_ps() { + let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let r = _mm512_cvtx_roundps_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); - let e = _mm256_set_ph( + let r = _mm512_cvtx_roundph_ps::<_MM_FROUND_NO_EXC>(a); + let e = _mm512_set_ps( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - assert_eq_m256h(r, e); + assert_eq_m512(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cvtx_roundps_ph() { - let a = _mm512_set_ps( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + unsafe fn test_mm512_mask_cvtx_roundph_ps() { + let src = _mm512_set_ps( + 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, + 24.0, 25.0, ); - let src = _mm256_set_ph( - 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., - ); - let r = _mm512_mask_cvtx_roundps_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, - 0b0101010101010101, - a, + let a = _mm256_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let e = _mm256_set_ph( + let r = _mm512_mask_cvtx_roundph_ps::<_MM_FROUND_NO_EXC>(src, 0b0101010101010101, a); + let e = _mm512_set_ps( 10.0, 2.0, 12.0, 4.0, 14.0, 6.0, 16.0, 8.0, 18.0, 10.0, 20.0, 12.0, 22.0, 14.0, 24.0, 16.0, ); - assert_eq_m256h(r, e); + assert_eq_m512(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_cvtx_roundps_ph() { - let a = _mm512_set_ps( + unsafe fn test_mm512_maskz_cvtx_roundph_ps() { + let a = _mm256_set_ph( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, ); - let r = _mm512_maskz_cvtx_roundps_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b0101010101010101, - a, - ); - let e = _mm256_set_ph( + let r = _mm512_maskz_cvtx_roundph_ps::<_MM_FROUND_NO_EXC>(0b0101010101010101, a); + let e = _mm512_set_ps( 0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0, 0.0, 10.0, 0.0, 12.0, 0.0, 14.0, 0.0, 16.0, ); - assert_eq_m256h(r, e); + assert_eq_m512(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cvtss_sh() { - let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let b = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); - let r = _mm_cvtss_sh(a, b); - let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); - assert_eq_m128h(r, e); + unsafe fn test_mm_cvtsh_ss() { + let a = _mm_setr_ps(2.0, 20.0, 21.0, 22.0); + let b = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_cvtsh_ss(a, b); + let e = _mm_setr_ps(1.0, 20.0, 21.0, 22.0); + assert_eq_m128(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_cvtss_sh() { - let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let b = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); - let src = _mm_setr_ph(20., 21., 22., 23., 24., 25., 26., 27.); - let r = _mm_mask_cvtss_sh(src, 0, a, b); - let e = _mm_setr_ph(20., 11., 12., 13., 14., 15., 16., 17.); - assert_eq_m128h(r, e); - let r = _mm_mask_cvtss_sh(src, 1, a, b); - let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); - assert_eq_m128h(r, e); + unsafe fn test_mm_mask_cvtsh_ss() { + let src = _mm_setr_ps(3.0, 11.0, 12.0, 13.0); + let a = _mm_setr_ps(2.0, 20.0, 21.0, 22.0); + let b = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_mask_cvtsh_ss(src, 0, a, b); + let e = _mm_setr_ps(3.0, 20.0, 21.0, 22.0); + assert_eq_m128(r, e); + let r = _mm_mask_cvtsh_ss(src, 1, a, b); + let e = _mm_setr_ps(1.0, 20.0, 21.0, 22.0); + assert_eq_m128(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_cvtss_sh() { - let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let b = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); - let r = _mm_maskz_cvtss_sh(0, a, b); - let e = _mm_setr_ph(0.0, 11., 12., 13., 14., 15., 16., 17.); - assert_eq_m128h(r, e); - let r = _mm_maskz_cvtss_sh(1, a, b); - let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); - assert_eq_m128h(r, e); + unsafe fn test_mm_maskz_cvtsh_ss() { + let a = _mm_setr_ps(2.0, 20.0, 21.0, 22.0); + let b = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_maskz_cvtsh_ss(0, a, b); + let e = _mm_setr_ps(0.0, 20.0, 21.0, 22.0); + assert_eq_m128(r, e); + let r = _mm_maskz_cvtsh_ss(1, a, b); + let e = _mm_setr_ps(1.0, 20.0, 21.0, 22.0); + assert_eq_m128(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cvt_roundss_sh() { - let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let b = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); - let r = _mm_cvt_roundss_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); - assert_eq_m128h(r, e); + unsafe fn test_mm_cvt_roundsh_ss() { + let a = _mm_setr_ps(2.0, 20.0, 21.0, 22.0); + let b = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_cvt_roundsh_ss::<_MM_FROUND_NO_EXC>(a, b); + let e = _mm_setr_ps(1.0, 20.0, 21.0, 22.0); + assert_eq_m128(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_cvt_roundss_sh() { - let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let b = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); - let src = _mm_setr_ph(20., 21., 22., 23., 24., 25., 26., 27.); - let r = _mm_mask_cvt_roundss_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_setr_ph(20., 11., 12., 13., 14., 15., 16., 17.); - assert_eq_m128h(r, e); - let r = _mm_mask_cvt_roundss_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); - assert_eq_m128h(r, e); + unsafe fn test_mm_mask_cvt_roundsh_ss() { + let src = _mm_setr_ps(3.0, 11.0, 12.0, 13.0); + let a = _mm_setr_ps(2.0, 20.0, 21.0, 22.0); + let b = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_mask_cvt_roundsh_ss::<_MM_FROUND_NO_EXC>(src, 0, a, b); + let e = _mm_setr_ps(3.0, 20.0, 21.0, 22.0); + assert_eq_m128(r, e); + let r = _mm_mask_cvt_roundsh_ss::<_MM_FROUND_NO_EXC>(src, 1, a, b); + let e = _mm_setr_ps(1.0, 20.0, 21.0, 22.0); + assert_eq_m128(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_cvt_roundss_sh() { - let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let b = _mm_setr_ps(1.0, 2.0, 3.0, 4.0); - let r = - _mm_maskz_cvt_roundss_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_setr_ph(0.0, 11., 12., 13., 14., 15., 16., 17.); - assert_eq_m128h(r, e); - let r = - _mm_maskz_cvt_roundss_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); - assert_eq_m128h(r, e); + unsafe fn test_mm_maskz_cvt_roundsh_ss() { + let a = _mm_setr_ps(2.0, 20.0, 21.0, 22.0); + let b = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_maskz_cvt_roundsh_ss::<_MM_FROUND_NO_EXC>(0, a, b); + let e = _mm_setr_ps(0.0, 20.0, 21.0, 22.0); + assert_eq_m128(r, e); + let r = _mm_maskz_cvt_roundsh_ss::<_MM_FROUND_NO_EXC>(1, a, b); + let e = _mm_setr_ps(1.0, 20.0, 21.0, 22.0); + assert_eq_m128(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_cvtpd_ph() { - let a = _mm_set_pd(1.0, 2.0); - let r = _mm_cvtpd_ph(a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0); - assert_eq_m128h(r, e); + unsafe fn test_mm_cvtph_pd() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0); + let r = _mm_cvtph_pd(a); + let e = _mm_set_pd(1.0, 2.0); + assert_eq_m128d(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_mask_cvtpd_ph() { - let a = _mm_set_pd(1.0, 2.0); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm_mask_cvtpd_ph(src, 0b01, a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 16., 2.); - assert_eq_m128h(r, e); + unsafe fn test_mm_mask_cvtph_pd() { + let src = _mm_set_pd(10.0, 11.0); + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0); + let r = _mm_mask_cvtph_pd(src, 0b01, a); + let e = _mm_set_pd(10.0, 2.0); + assert_eq_m128d(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm_maskz_cvtpd_ph() { - let a = _mm_set_pd(1.0, 2.0); - let r = _mm_maskz_cvtpd_ph(0b01, a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0); - assert_eq_m128h(r, e); + unsafe fn test_mm_maskz_cvtph_pd() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0); + let r = _mm_maskz_cvtph_pd(0b01, a); + let e = _mm_set_pd(0.0, 2.0); + assert_eq_m128d(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_cvtpd_ph() { - let a = _mm256_set_pd(1.0, 2.0, 3.0, 4.0); - let r = _mm256_cvtpd_ph(a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); - assert_eq_m128h(r, e); + unsafe fn test_mm256_cvtph_pd() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let r = _mm256_cvtph_pd(a); + let e = _mm256_set_pd(1.0, 2.0, 3.0, 4.0); + assert_eq_m256d(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_mask_cvtpd_ph() { - let a = _mm256_set_pd(1.0, 2.0, 3.0, 4.0); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm256_mask_cvtpd_ph(src, 0b0101, a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 14., 2.0, 16.0, 4.0); - assert_eq_m128h(r, e); + unsafe fn test_mm256_mask_cvtph_pd() { + let src = _mm256_set_pd(10.0, 11.0, 12.0, 13.0); + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let r = _mm256_mask_cvtph_pd(src, 0b0101, a); + let e = _mm256_set_pd(10.0, 2.0, 12.0, 4.0); + assert_eq_m256d(r, e); } #[simd_test(enable = "avx512fp16,avx512vl")] - unsafe fn test_mm256_maskz_cvtpd_ph() { - let a = _mm256_set_pd(1.0, 2.0, 3.0, 4.0); - let r = _mm256_maskz_cvtpd_ph(0b0101, a); - let e = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 4.0); - assert_eq_m128h(r, e); + unsafe fn test_mm256_maskz_cvtph_pd() { + let a = _mm_set_ph(0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0); + let r = _mm256_maskz_cvtph_pd(0b0101, a); + let e = _mm256_set_pd(0.0, 2.0, 0.0, 4.0); + assert_eq_m256d(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cvtpd_ph() { - let a = _mm512_set_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let r = _mm512_cvtpd_ph(a); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_cvtph_pd() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_cvtph_pd(a); + let e = _mm512_set_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + assert_eq_m512d(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cvtpd_ph() { - let a = _mm512_set_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm512_mask_cvtpd_ph(src, 0b01010101, a); - let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); - assert_eq_m128h(r, e); + unsafe fn test_mm512_mask_cvtph_pd() { + let src = _mm512_set_pd(10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0); + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_mask_cvtph_pd(src, 0b01010101, a); + let e = _mm512_set_pd(10.0, 2.0, 12.0, 4.0, 14.0, 6.0, 16.0, 8.0); + assert_eq_m512d(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_cvtpd_ph() { - let a = _mm512_set_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let r = _mm512_maskz_cvtpd_ph(0b01010101, a); - let e = _mm_set_ph(0.0, 2., 0.0, 4., 0.0, 6., 0.0, 8.); - assert_eq_m128h(r, e); + unsafe fn test_mm512_maskz_cvtph_pd() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_maskz_cvtph_pd(0b01010101, a); + let e = _mm512_set_pd(0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0); + assert_eq_m512d(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_cvt_roundpd_ph() { - let a = _mm512_set_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let r = _mm512_cvt_roundpd_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); - let e = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - assert_eq_m128h(r, e); + unsafe fn test_mm512_cvt_roundph_pd() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_cvt_roundph_pd::<_MM_FROUND_NO_EXC>(a); + let e = _mm512_set_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + assert_eq_m512d(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_mask_cvt_roundpd_ph() { - let a = _mm512_set_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let src = _mm_set_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let r = _mm512_mask_cvt_roundpd_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0b01010101, a, - ); - let e = _mm_set_ph(10., 2., 12., 4., 14., 6., 16., 8.); - assert_eq_m128h(r, e); + unsafe fn test_mm512_mask_cvt_roundph_pd() { + let src = _mm512_set_pd(10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0); + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_mask_cvt_roundph_pd::<_MM_FROUND_NO_EXC>(src, 0b01010101, a); + let e = _mm512_set_pd(10.0, 2.0, 12.0, 4.0, 14.0, 6.0, 16.0, 8.0); + assert_eq_m512d(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm512_maskz_cvt_roundpd_ph() { - let a = _mm512_set_pd(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); - let r = _mm512_maskz_cvt_roundpd_ph::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - 0b01010101, a, - ); - let e = _mm_set_ph(0.0, 2., 0.0, 4., 0.0, 6., 0.0, 8.); - assert_eq_m128h(r, e); + unsafe fn test_mm512_maskz_cvt_roundph_pd() { + let a = _mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm512_maskz_cvt_roundph_pd::<_MM_FROUND_NO_EXC>(0b01010101, a); + let e = _mm512_set_pd(0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0); + assert_eq_m512d(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cvtsd_sh() { - let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let b = _mm_setr_pd(1.0, 2.0); - let r = _mm_cvtsd_sh(a, b); - let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); - assert_eq_m128h(r, e); + unsafe fn test_mm_cvtsh_sd() { + let a = _mm_setr_pd(2.0, 20.0); + let b = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_cvtsh_sd(a, b); + let e = _mm_setr_pd(1.0, 20.0); + assert_eq_m128d(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_cvtsd_sh() { - let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let b = _mm_setr_pd(1.0, 2.0); - let src = _mm_setr_ph(20., 21., 22., 23., 24., 25., 26., 27.); - let r = _mm_mask_cvtsd_sh(src, 0, a, b); - let e = _mm_setr_ph(20., 11., 12., 13., 14., 15., 16., 17.); - assert_eq_m128h(r, e); - let r = _mm_mask_cvtsd_sh(src, 1, a, b); - let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); - assert_eq_m128h(r, e); + unsafe fn test_mm_mask_cvtsh_sd() { + let src = _mm_setr_pd(3.0, 11.0); + let a = _mm_setr_pd(2.0, 20.0); + let b = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_mask_cvtsh_sd(src, 0, a, b); + let e = _mm_setr_pd(3.0, 20.0); + assert_eq_m128d(r, e); + let r = _mm_mask_cvtsh_sd(src, 1, a, b); + let e = _mm_setr_pd(1.0, 20.0); + assert_eq_m128d(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_cvtsd_sh() { - let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let b = _mm_setr_pd(1.0, 2.0); - let r = _mm_maskz_cvtsd_sh(0, a, b); - let e = _mm_setr_ph(0.0, 11., 12., 13., 14., 15., 16., 17.); - assert_eq_m128h(r, e); - let r = _mm_maskz_cvtsd_sh(1, a, b); - let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); - assert_eq_m128h(r, e); + unsafe fn test_mm_maskz_cvtsh_sd() { + let a = _mm_setr_pd(2.0, 20.0); + let b = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_maskz_cvtsh_sd(0, a, b); + let e = _mm_setr_pd(0.0, 20.0); + assert_eq_m128d(r, e); + let r = _mm_maskz_cvtsh_sd(1, a, b); + let e = _mm_setr_pd(1.0, 20.0); + assert_eq_m128d(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_cvt_roundsd_sh() { - let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let b = _mm_setr_pd(1.0, 2.0); - let r = _mm_cvt_roundsd_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a, b); - let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); - assert_eq_m128h(r, e); + unsafe fn test_mm_cvt_roundsh_sd() { + let a = _mm_setr_pd(2.0, 20.0); + let b = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_cvt_roundsh_sd::<_MM_FROUND_NO_EXC>(a, b); + let e = _mm_setr_pd(1.0, 20.0); + assert_eq_m128d(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_mask_cvt_roundsd_sh() { - let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let b = _mm_setr_pd(1.0, 2.0); - let src = _mm_setr_ph(20., 21., 22., 23., 24., 25., 26., 27.); - let r = _mm_mask_cvt_roundsd_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 0, a, b, - ); - let e = _mm_setr_ph(20., 11., 12., 13., 14., 15., 16., 17.); - assert_eq_m128h(r, e); - let r = _mm_mask_cvt_roundsd_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>( - src, 1, a, b, - ); - let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); - assert_eq_m128h(r, e); + unsafe fn test_mm_mask_cvt_roundsh_sd() { + let src = _mm_setr_pd(3.0, 11.0); + let a = _mm_setr_pd(2.0, 20.0); + let b = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_mask_cvt_roundsh_sd::<_MM_FROUND_NO_EXC>(src, 0, a, b); + let e = _mm_setr_pd(3.0, 20.0); + assert_eq_m128d(r, e); + let r = _mm_mask_cvt_roundsh_sd::<_MM_FROUND_NO_EXC>(src, 1, a, b); + let e = _mm_setr_pd(1.0, 20.0); + assert_eq_m128d(r, e); } #[simd_test(enable = "avx512fp16")] - unsafe fn test_mm_maskz_cvt_roundsd_sh() { - let a = _mm_setr_ph(10., 11., 12., 13., 14., 15., 16., 17.); - let b = _mm_setr_pd(1.0, 2.0); - let r = - _mm_maskz_cvt_roundsd_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(0, a, b); - let e = _mm_setr_ph(0.0, 11., 12., 13., 14., 15., 16., 17.); - assert_eq_m128h(r, e); - let r = - _mm_maskz_cvt_roundsd_sh::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(1, a, b); - let e = _mm_setr_ph(1.0, 11., 12., 13., 14., 15., 16., 17.); - assert_eq_m128h(r, e); + unsafe fn test_mm_maskz_cvt_roundsh_sd() { + let a = _mm_setr_pd(2.0, 20.0); + let b = _mm_setr_ph(1.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0); + let r = _mm_maskz_cvt_roundsh_sd::<_MM_FROUND_NO_EXC>(0, a, b); + let e = _mm_setr_pd(0.0, 20.0); + assert_eq_m128d(r, e); + let r = _mm_maskz_cvt_roundsh_sd::<_MM_FROUND_NO_EXC>(1, a, b); + let e = _mm_setr_pd(1.0, 20.0); + assert_eq_m128d(r, e); } } diff --git a/crates/core_arch/src/x86_64/avx512fp16.rs b/crates/core_arch/src/x86_64/avx512fp16.rs index ebd85ed4ad..dc216627a6 100644 --- a/crates/core_arch/src/x86_64/avx512fp16.rs +++ b/crates/core_arch/src/x86_64/avx512fp16.rs @@ -74,6 +74,130 @@ pub unsafe fn _mm_cvt_roundu64_sh(a: __m128h, b: u64) -> __ vcvtusi642sh(a, b, ROUNDING) } +/// Convert the lower half-precision (16-bit) floating-point element in a to a 64-bit integer, and store +/// the result in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsh_i64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsh2si))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtsh_i64(a: __m128h) -> i64 { + vcvtsh2si64(a, _MM_FROUND_CUR_DIRECTION) +} + +/// Convert the lower half-precision (16-bit) floating-point element in a to a 64-bit integer, and store +/// the result in dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsh_i64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsh2si, ROUNDING = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvt_roundsh_i64(a: __m128h) -> i64 { + static_assert_rounding!(ROUNDING); + vcvtsh2si64(a, ROUNDING) +} + +/// Convert the lower half-precision (16-bit) floating-point element in a to a 64-bit unsigned integer, and store +/// the result in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsh_u64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsh2usi))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtsh_u64(a: __m128h) -> u64 { + vcvtsh2usi64(a, _MM_FROUND_CUR_DIRECTION) +} + +/// Convert the lower half-precision (16-bit) floating-point element in a to a 64-bit unsigned integer, and store +/// the result in dst. +/// +/// Rounding is done according to the rounding parameter, which can be one of: +/// +/// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and suppress exceptions +/// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and suppress exceptions +/// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress exceptions +/// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress exceptions +/// _MM_FROUND_CUR_DIRECTION +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_roundsh_u64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvtsh2usi, ROUNDING = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvt_roundsh_u64(a: __m128h) -> u64 { + static_assert_rounding!(ROUNDING); + vcvtsh2usi64(a, ROUNDING) +} + +/// Convert the lower half-precision (16-bit) floating-point element in a to a 64-bit integer with truncation, +/// and store the result in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsh_i64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttsh2si))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvttsh_i64(a: __m128h) -> i64 { + vcvttsh2si64(a, _MM_FROUND_CUR_DIRECTION) +} + +/// Convert the lower half-precision (16-bit) floating-point element in a to a 64-bit integer with truncation, +/// and store the result in dst. +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_roundsh_i64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttsh2si, SAE = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtt_roundsh_i64(a: __m128h) -> i64 { + static_assert_sae!(SAE); + vcvttsh2si64(a, SAE) +} + +/// Convert the lower half-precision (16-bit) floating-point element in a to a 64-bit unsigned integer with truncation, +/// and store the result in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsh_u64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttsh2usi))] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvttsh_u64(a: __m128h) -> u64 { + vcvttsh2usi64(a, _MM_FROUND_CUR_DIRECTION) +} + +/// Convert the lower half-precision (16-bit) floating-point element in a to a 64-bit unsigned integer with truncation, +/// and store the result in dst. +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_roundsh_u64) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[cfg_attr(test, assert_instr(vcvttsh2usi, SAE = 8))] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtt_roundsh_u64(a: __m128h) -> u64 { + static_assert_sae!(SAE); + vcvttsh2usi64(a, SAE) +} + #[allow(improper_ctypes)] extern "C" { #[link_name = "llvm.x86.avx512fp16.vcvtsi642sh"] @@ -126,4 +250,60 @@ mod tests { let e = _mm_setr_ph(10.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); assert_eq_m128h(r, e); } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cvtsh_i64() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvtsh_i64(a); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cvt_roundsh_i64() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvt_roundsh_i64::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cvtsh_u64() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvtsh_u64(a); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cvt_roundsh_u64() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvt_roundsh_u64::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cvttsh_i64() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvttsh_i64(a); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cvtt_roundsh_i64() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvtt_roundsh_i64::<{ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC }>(a); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cvttsh_u64() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvttsh_u64(a); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cvtt_roundsh_u64() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvtt_roundsh_u64::<_MM_FROUND_NO_EXC>(a); + assert_eq!(r, 1); + } } From cf01aba4ded086eb87d149ac1e78cf05c87cdcaa Mon Sep 17 00:00:00 2001 From: sayantn Date: Wed, 17 Jul 2024 18:39:29 +0530 Subject: [PATCH 10/11] AVX512FP16 Part 9: Remaining avx512fp16 and avxneconvert --- crates/core_arch/missing-x86.md | 18 -- crates/core_arch/src/x86/avx512fp16.rs | 245 ++++++++++++++++++++--- crates/core_arch/src/x86/avxneconvert.rs | 156 +++++++++++++++ crates/stdarch-verify/tests/x86-intel.rs | 1 + 4 files changed, 377 insertions(+), 43 deletions(-) diff --git a/crates/core_arch/missing-x86.md b/crates/core_arch/missing-x86.md index 94ecc929ef..bb1d4c5c7a 100644 --- a/crates/core_arch/missing-x86.md +++ b/crates/core_arch/missing-x86.md @@ -53,15 +53,8 @@

["AVX512_FP16"]

- * [ ] [`_mm256_cvtsh_h`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtsh_h) * [ ] [`_mm256_set1_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set1_pch) - * [ ] [`_mm512_cmp_round_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_round_ph_mask) - * [ ] [`_mm512_cvtsh_h`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtsh_h) - * [ ] [`_mm512_mask_cmp_round_ph_mask`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_round_ph_mask) * [ ] [`_mm512_set1_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set1_pch) - * [ ] [`_mm_cvtsh_h`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsh_h) - * [ ] [`_mm_cvtsi128_si16`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi128_si16) - * [ ] [`_mm_cvtsi16_si128`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi16_si128) * [ ] [`_mm_set1_pch`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set1_pch)

@@ -82,17 +75,6 @@

-
["AVX_NE_CONVERT"]

- - * [ ] [`_mm256_bcstnesh_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_bcstnesh_ps) - * [ ] [`_mm256_cvtneeph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtneeph_ps) - * [ ] [`_mm256_cvtneoph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtneoph_ps) - * [ ] [`_mm_bcstnesh_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_bcstnesh_ps) - * [ ] [`_mm_cvtneeph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtneeph_ps) - * [ ] [`_mm_cvtneoph_ps`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtneoph_ps) -

- -
["CET_SS"]

* [ ] [`_clrssbsy`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_clrssbsy) diff --git a/crates/core_arch/src/x86/avx512fp16.rs b/crates/core_arch/src/x86/avx512fp16.rs index 86d38feaec..7897dd970c 100644 --- a/crates/core_arch/src/x86/avx512fp16.rs +++ b/crates/core_arch/src/x86/avx512fp16.rs @@ -596,6 +596,25 @@ pub unsafe fn _mm256_zextph128_ph256(a: __m128h) -> __m256h { ) } +/// Cast vector of type `__m256h` to type `__m512h`. The upper 16 elements of the result are zeroed. +/// This intrinsic can generate the `vzeroupper` instruction, but most of the time it does not generate +/// any instructions. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_zextph256_ph512) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_zextph256_ph512(a: __m256h) -> __m512h { + simd_shuffle!( + a, + _mm256_setzero_ph(), + [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16 + ] + ) +} + /// Cast vector of type `__m128h` to type `__m512h`. The upper 24 elements of the result are zeroed. /// This intrinsic can generate the `vzeroupper` instruction, but most of the time it does not generate /// any instructions. @@ -615,10 +634,10 @@ pub unsafe fn _mm512_zextph128_ph512(a: __m128h) -> __m512h { ) } -macro_rules! cmp_asm { +macro_rules! cmp_asm { // FIXME: use LLVM intrinsics ($mask_type: ty, $reg: ident, $a: expr, $b: expr) => {{ let dst: $mask_type; - crate::arch::asm!( + asm!( "vcmpph {k}, {a}, {b}, {imm8}", k = lateout(kreg) dst, a = in($reg) $a, @@ -630,7 +649,7 @@ macro_rules! cmp_asm { }}; ($mask_type: ty, $mask: expr, $reg: ident, $a: expr, $b: expr) => {{ let dst: $mask_type; - crate::arch::asm!( + asm!( "vcmpph {k} {{ {mask} }}, {a}, {b}, {imm8}", k = lateout(kreg) dst, mask = in(kreg) $mask, @@ -736,6 +755,73 @@ pub unsafe fn _mm512_mask_cmp_ph_mask( cmp_asm!(__mmask32, k1, zmm_reg, a, b) } +/// Compare packed half-precision (16-bit) floating-point elements in a and b based on the comparison +/// operand specified by imm8, and store the results in mask vector k. +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cmp_round_ph_mask) +#[inline] +#[target_feature(enable = "avx512fp16,avx512bw,avx512f")] +#[rustc_legacy_const_generics(2, 3)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cmp_round_ph_mask( + a: __m512h, + b: __m512h, +) -> __mmask32 { + static_assert_uimm_bits!(IMM5, 5); + static_assert_sae!(SAE); + if SAE == _MM_FROUND_NO_EXC { + let dst: __mmask32; + asm!( + "vcmpph {k}, {a}, {b}, {{sae}}, {imm8}", + k = lateout(kreg) dst, + a = in(zmm_reg) a, + b = in(zmm_reg) b, + imm8 = const IMM5, + options(pure, nomem, nostack) + ); + dst + } else { + cmp_asm!(__mmask32, zmm_reg, a, b) + } +} + +/// Compare packed half-precision (16-bit) floating-point elements in a and b based on the comparison +/// operand specified by imm8, and store the results in mask vector k using zeromask k (elements are +/// zeroed out when the corresponding mask bit is not set). +/// +/// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_cmp_round_ph_mask) +#[inline] +#[target_feature(enable = "avx512fp16,avx512bw,avx512f")] +#[rustc_legacy_const_generics(3, 4)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_mask_cmp_round_ph_mask( + k1: __mmask32, + a: __m512h, + b: __m512h, +) -> __mmask32 { + static_assert_uimm_bits!(IMM5, 5); + static_assert_sae!(SAE); + if SAE == _MM_FROUND_NO_EXC { + let dst: __mmask32; + asm!( + "vcmpph {k} {{{k1}}}, {a}, {b}, {{sae}}, {imm8}", + k = lateout(kreg) dst, + k1 = in(kreg) k1, + a = in(zmm_reg) a, + b = in(zmm_reg) b, + imm8 = const IMM5, + options(pure, nomem, nostack) + ); + dst + } else { + cmp_asm!(__mmask32, k1, zmm_reg, a, b) + } +} + /// Compare the lower half-precision (16-bit) floating-point elements in a and b based on the comparison /// operand specified by imm8, and store the result in mask vector k. Exceptions can be suppressed by /// passing _MM_FROUND_NO_EXC in the sae parameter. @@ -803,25 +889,6 @@ pub unsafe fn _mm_mask_cmp_sh_mask( _mm_mask_cmp_round_sh_mask::(k1, a, b) } -/// Cast vector of type `__m256h` to type `__m512h`. The upper 16 elements of the result are zeroed. -/// This intrinsic can generate the `vzeroupper` instruction, but most of the time it does not generate -/// any instructions. -/// -/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_zextph256_ph512) -#[inline] -#[target_feature(enable = "avx512fp16")] -#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] -pub unsafe fn _mm512_zextph256_ph512(a: __m256h) -> __m512h { - simd_shuffle!( - a, - _mm256_setzero_ph(), - [ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 16, 16 - ] - ) -} - /// Compare the lower half-precision (16-bit) floating-point elements in a and b based on the comparison /// operand specified by imm8, and return the boolean result (0 or 1). /// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. @@ -10942,10 +11009,10 @@ pub unsafe fn _mm512_reduce_max_ph(a: __m512h) -> f16 { _mm256_reduce_max_ph(_mm256_max_ph(p, q)) } -macro_rules! fpclass_asm { +macro_rules! fpclass_asm { // FIXME: use LLVM intrinsics ($mask_type: ty, $reg: ident, $a: expr) => {{ let dst: $mask_type; - crate::arch::asm!( + asm!( "vfpclassph {k}, {src}, {imm8}", k = lateout(kreg) dst, src = in($reg) $a, @@ -10956,7 +11023,7 @@ macro_rules! fpclass_asm { }}; ($mask_type: ty, $mask: expr, $reg: ident, $a: expr) => {{ let dst: $mask_type; - crate::arch::asm!( + asm!( "vfpclassph {k} {{ {mask} }}, {src}, {imm8}", k = lateout(kreg) dst, mask = in(kreg) $mask, @@ -15873,6 +15940,56 @@ pub unsafe fn _mm_maskz_cvt_roundsh_sd( _mm_mask_cvt_roundsh_sd::(_mm_setzero_pd(), k, a, b) } +/// Copy the lower half-precision (16-bit) floating-point element from `a` to `dst`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsh_h) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtsh_h(a: __m128h) -> f16 { + simd_extract!(a, 0) +} + +/// Copy the lower half-precision (16-bit) floating-point element from `a` to `dst`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtsh_h) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cvtsh_h(a: __m256h) -> f16 { + simd_extract!(a, 0) +} + +/// Copy the lower half-precision (16-bit) floating-point element from `a` to `dst`. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_cvtsh_h) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm512_cvtsh_h(a: __m512h) -> f16 { + simd_extract!(a, 0) +} + +/// Copy the lower 16-bit integer in a to dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi128_si16) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtsi128_si16(a: __m128i) -> i16 { + simd_extract!(a.as_i16x8(), 0) +} + +/// Copy 16-bit integer a to the lower elements of dst, and zero the upper elements of dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi16_si128) +#[inline] +#[target_feature(enable = "avx512fp16")] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtsi16_si128(a: i16) -> __m128i { + transmute(simd_insert!(i16x8::splat(0), 0, a)) +} + #[allow(improper_ctypes)] extern "C" { #[link_name = "llvm.x86.avx512fp16.mask.cmp.sh"] @@ -16693,6 +16810,42 @@ mod tests { assert_eq!(r, 0b01010000010100000101000001010000); } + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_cmp_round_ph_mask() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0, 9.0, 10.0, 11.0, 12.0, -13.0, -14.0, -15.0, + -16.0, 17.0, 18.0, 19.0, 20.0, -21.0, -22.0, -23.0, -24.0, 25.0, 26.0, 27.0, 28.0, + -29.0, -30.0, -31.0, -32.0, + ); + let r = _mm512_cmp_round_ph_mask::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>(a, b); + assert_eq!(r, 0b11110000111100001111000011110000); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_mask_cmp_round_ph_mask() { + let a = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let b = _mm512_set_ph( + 1.0, 2.0, 3.0, 4.0, -5.0, -6.0, -7.0, -8.0, 9.0, 10.0, 11.0, 12.0, -13.0, -14.0, -15.0, + -16.0, 17.0, 18.0, 19.0, 20.0, -21.0, -22.0, -23.0, -24.0, 25.0, 26.0, 27.0, 28.0, + -29.0, -30.0, -31.0, -32.0, + ); + let r = _mm512_mask_cmp_round_ph_mask::<_CMP_EQ_OQ, _MM_FROUND_NO_EXC>( + 0b01010101010101010101010101010101, + a, + b, + ); + assert_eq!(r, 0b01010000010100000101000001010000); + } + #[simd_test(enable = "avx512fp16")] unsafe fn test_mm_cmp_round_sh_mask() { let a = _mm_set_sh(1.0); @@ -26800,4 +26953,46 @@ mod tests { let e = _mm_setr_pd(1.0, 20.0); assert_eq_m128d(r, e); } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cvtsh_h() { + let a = _mm_setr_ph(1.0, 2.0, 3.0, 42.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvtsh_h(a); + assert_eq!(r, 1.0); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm256_cvtsh_h() { + let a = _mm256_setr_ph( + 1.0, 2.0, 3.0, 42.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let r = _mm256_cvtsh_h(a); + assert_eq!(r, 1.0); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm512_cvtsh_h() { + let a = _mm512_setr_ph( + 1.0, 2.0, 3.0, 42.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, + 31.0, 32.0, + ); + let r = _mm512_cvtsh_h(a); + assert_eq!(r, 1.0); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cvtsi128_si16() { + let a = _mm_setr_epi16(1, 2, 3, 4, 5, 6, 7, 8); + let r = _mm_cvtsi128_si16(a); + assert_eq!(r, 1); + } + + #[simd_test(enable = "avx512fp16")] + unsafe fn test_mm_cvtsi16_si128() { + let a = 1; + let r = _mm_cvtsi16_si128(a); + let e = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0); + assert_eq_m128i(r, e); + } } diff --git a/crates/core_arch/src/x86/avxneconvert.rs b/crates/core_arch/src/x86/avxneconvert.rs index ec9678ac43..185e059d61 100644 --- a/crates/core_arch/src/x86/avxneconvert.rs +++ b/crates/core_arch/src/x86/avxneconvert.rs @@ -36,6 +36,38 @@ pub unsafe fn _mm256_bcstnebf16_ps(a: *const bf16) -> __m256 { bcstnebf162ps_256(a) } +/// Convert scalar half-precision (16-bit) floating-point element stored at memory locations starting +/// at location a to a single-precision (32-bit) floating-point, broadcast it to packed single-precision +/// (32-bit) floating-point elements, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_bcstnesh_ps) +#[inline] +#[target_feature(enable = "avxneconvert")] +#[cfg_attr( + all(test, any(target_os = "linux", target_env = "msvc")), + assert_instr(vbcstnesh2ps) +)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_bcstnesh_ps(a: *const f16) -> __m128 { + bcstnesh2ps_128(a) +} + +/// Convert scalar half-precision (16-bit) floating-point element stored at memory locations starting +/// at location a to a single-precision (32-bit) floating-point, broadcast it to packed single-precision +/// (32-bit) floating-point elements, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_bcstnesh_ps) +#[inline] +#[target_feature(enable = "avxneconvert")] +#[cfg_attr( + all(test, any(target_os = "linux", target_env = "msvc")), + assert_instr(vbcstnesh2ps) +)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_bcstnesh_ps(a: *const f16) -> __m256 { + bcstnesh2ps_256(a) +} + /// Convert packed BF16 (16-bit) floating-point even-indexed elements stored at memory locations starting at /// location a to single precision (32-bit) floating-point elements, and store the results in dst. /// @@ -66,6 +98,36 @@ pub unsafe fn _mm256_cvtneebf16_ps(a: *const __m256bh) -> __m256 { transmute(cvtneebf162ps_256(a)) } +/// Convert packed half-precision (16-bit) floating-point even-indexed elements stored at memory locations starting at +/// location a to single precision (32-bit) floating-point elements, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtneeph_ps) +#[inline] +#[target_feature(enable = "avxneconvert")] +#[cfg_attr( + all(test, any(target_os = "linux", target_env = "msvc")), + assert_instr(vcvtneeph2ps) +)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtneeph_ps(a: *const __m128h) -> __m128 { + transmute(cvtneeph2ps_128(a)) +} + +/// Convert packed half-precision (16-bit) floating-point even-indexed elements stored at memory locations starting at +/// location a to single precision (32-bit) floating-point elements, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtneeph_ps) +#[inline] +#[target_feature(enable = "avxneconvert")] +#[cfg_attr( + all(test, any(target_os = "linux", target_env = "msvc")), + assert_instr(vcvtneeph2ps) +)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cvtneeph_ps(a: *const __m256h) -> __m256 { + transmute(cvtneeph2ps_256(a)) +} + /// Convert packed BF16 (16-bit) floating-point odd-indexed elements stored at memory locations starting at /// location a to single precision (32-bit) floating-point elements, and store the results in dst. /// @@ -96,6 +158,36 @@ pub unsafe fn _mm256_cvtneobf16_ps(a: *const __m256bh) -> __m256 { transmute(cvtneobf162ps_256(a)) } +/// Convert packed half-precision (16-bit) floating-point odd-indexed elements stored at memory locations starting at +/// location a to single precision (32-bit) floating-point elements, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtneoph_ps) +#[inline] +#[target_feature(enable = "avxneconvert")] +#[cfg_attr( + all(test, any(target_os = "linux", target_env = "msvc")), + assert_instr(vcvtneoph2ps) +)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm_cvtneoph_ps(a: *const __m128h) -> __m128 { + transmute(cvtneoph2ps_128(a)) +} + +/// Convert packed half-precision (16-bit) floating-point odd-indexed elements stored at memory locations starting at +/// location a to single precision (32-bit) floating-point elements, and store the results in dst. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtneoph_ps) +#[inline] +#[target_feature(enable = "avxneconvert")] +#[cfg_attr( + all(test, any(target_os = "linux", target_env = "msvc")), + assert_instr(vcvtneoph2ps) +)] +#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] +pub unsafe fn _mm256_cvtneoph_ps(a: *const __m256h) -> __m256 { + transmute(cvtneoph2ps_256(a)) +} + /// Convert packed single precision (32-bit) floating-point elements in a to packed BF16 (16-bit) floating-point /// elements, and store the results in dst. /// @@ -146,16 +238,28 @@ extern "C" { fn bcstnebf162ps_128(a: *const bf16) -> __m128; #[link_name = "llvm.x86.vbcstnebf162ps256"] fn bcstnebf162ps_256(a: *const bf16) -> __m256; + #[link_name = "llvm.x86.vbcstnesh2ps128"] + fn bcstnesh2ps_128(a: *const f16) -> __m128; + #[link_name = "llvm.x86.vbcstnesh2ps256"] + fn bcstnesh2ps_256(a: *const f16) -> __m256; #[link_name = "llvm.x86.vcvtneebf162ps128"] fn cvtneebf162ps_128(a: *const __m128bh) -> __m128; #[link_name = "llvm.x86.vcvtneebf162ps256"] fn cvtneebf162ps_256(a: *const __m256bh) -> __m256; + #[link_name = "llvm.x86.vcvtneeph2ps128"] + fn cvtneeph2ps_128(a: *const __m128h) -> __m128; + #[link_name = "llvm.x86.vcvtneeph2ps256"] + fn cvtneeph2ps_256(a: *const __m256h) -> __m256; #[link_name = "llvm.x86.vcvtneobf162ps128"] fn cvtneobf162ps_128(a: *const __m128bh) -> __m128; #[link_name = "llvm.x86.vcvtneobf162ps256"] fn cvtneobf162ps_256(a: *const __m256bh) -> __m256; + #[link_name = "llvm.x86.vcvtneoph2ps128"] + fn cvtneoph2ps_128(a: *const __m128h) -> __m128; + #[link_name = "llvm.x86.vcvtneoph2ps256"] + fn cvtneoph2ps_256(a: *const __m256h) -> __m256; } #[cfg(test)] @@ -191,6 +295,22 @@ mod tests { assert_eq_m256(r, e); } + #[simd_test(enable = "avxneconvert")] + unsafe fn test_mm_bcstnesh_ps() { + let a = 1.0_f16; + let r = _mm_bcstnesh_ps(addr_of!(a)); + let e = _mm_set_ps(1., 1., 1., 1.); + assert_eq_m128(r, e); + } + + #[simd_test(enable = "avxneconvert")] + unsafe fn test_mm256_bcstnesh_ps() { + let a = 1.0_f16; + let r = _mm256_bcstnesh_ps(addr_of!(a)); + let e = _mm256_set_ps(1., 1., 1., 1., 1., 1., 1., 1.); + assert_eq_m256(r, e); + } + #[simd_test(enable = "avxneconvert")] unsafe fn test_mm_cvtneebf16_ps() { let a = __m128bh( @@ -212,6 +332,24 @@ mod tests { assert_eq_m256(r, e); } + #[simd_test(enable = "avxneconvert")] + unsafe fn test_mm_cvtneeph_ps() { + let a = __m128h(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvtneeph_ps(addr_of!(a)); + let e = _mm_setr_ps(1., 3., 5., 7.); + assert_eq_m128(r, e); + } + + #[simd_test(enable = "avxneconvert")] + unsafe fn test_mm256_cvtneeph_ps() { + let a = __m256h( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let r = _mm256_cvtneeph_ps(addr_of!(a)); + let e = _mm256_setr_ps(1., 3., 5., 7., 9., 11., 13., 15.); + assert_eq_m256(r, e); + } + #[simd_test(enable = "avxneconvert")] unsafe fn test_mm_cvtneobf16_ps() { let a = __m128bh( @@ -233,6 +371,24 @@ mod tests { assert_eq_m256(r, e); } + #[simd_test(enable = "avxneconvert")] + unsafe fn test_mm_cvtneoph_ps() { + let a = __m128h(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0); + let r = _mm_cvtneoph_ps(addr_of!(a)); + let e = _mm_setr_ps(2., 4., 6., 8.); + assert_eq_m128(r, e); + } + + #[simd_test(enable = "avxneconvert")] + unsafe fn test_mm256_cvtneoph_ps() { + let a = __m256h( + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + ); + let r = _mm256_cvtneoph_ps(addr_of!(a)); + let e = _mm256_setr_ps(2., 4., 6., 8., 10., 12., 14., 16.); + assert_eq_m256(r, e); + } + #[simd_test(enable = "avxneconvert")] unsafe fn test_mm_cvtneps_avx_pbh() { let a = _mm_setr_ps(1., 2., 3., 4.); diff --git a/crates/stdarch-verify/tests/x86-intel.rs b/crates/stdarch-verify/tests/x86-intel.rs index fadaa6a4b1..1416afdec0 100644 --- a/crates/stdarch-verify/tests/x86-intel.rs +++ b/crates/stdarch-verify/tests/x86-intel.rs @@ -808,6 +808,7 @@ fn equate( (&Type::ConstPtr(&Type::M256BH), "__m256bh const*") => {} (&Type::ConstPtr(&Type::M256I), "__m256i const*") => {} (&Type::ConstPtr(&Type::M256D), "__m256d const*") => {} + (&Type::ConstPtr(&Type::M256H), "__m256h const*") => {} (&Type::ConstPtr(&Type::M512), "__m512 const*") => {} (&Type::ConstPtr(&Type::M512BH), "__m512bh const*") => {} (&Type::ConstPtr(&Type::M512I), "__m512i const*") => {} From 8a5e9712979eeedf5c1ce714854cea2b3362f5b6 Mon Sep 17 00:00:00 2001 From: sayantn Date: Wed, 17 Jul 2024 18:41:41 +0530 Subject: [PATCH 11/11] Update Intrinsics List to v3.6.9 Add `#[inline]` to avx512ifma intrinsics Fix the test equality. Remove the stability attributes in simd types and test functions --- crates/core_arch/missing-x86.md | 6 +- crates/core_arch/src/simd.rs | 6 +- crates/core_arch/src/x86/avx512ifma.rs | 12 + crates/core_arch/src/x86/test.rs | 12 +- crates/stdarch-verify/tests/x86-intel.rs | 2 +- crates/stdarch-verify/x86-intel.xml | 305314 +++++++++----------- 6 files changed, 142494 insertions(+), 162858 deletions(-) diff --git a/crates/core_arch/missing-x86.md b/crates/core_arch/missing-x86.md index bb1d4c5c7a..e8f16f7e69 100644 --- a/crates/core_arch/missing-x86.md +++ b/crates/core_arch/missing-x86.md @@ -249,7 +249,7 @@

-
["SHA512", "SHA512"]

+

["SHA512", "AVX"]

* [ ] [`_mm256_sha512msg1_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sha512msg1_epi64) * [ ] [`_mm256_sha512msg2_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sha512msg2_epi64) @@ -257,7 +257,7 @@

-
["SM3"]

+

["SM3", "AVX"]

* [ ] [`_mm_sm3msg1_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sm3msg1_epi32) * [ ] [`_mm_sm3msg2_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sm3msg2_epi32) @@ -265,7 +265,7 @@

-
["SM4"]

+

["SM4", "AVX"]

* [ ] [`_mm256_sm4key4_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sm4key4_epi32) * [ ] [`_mm256_sm4rnds4_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sm4rnds4_epi32) diff --git a/crates/core_arch/src/simd.rs b/crates/core_arch/src/simd.rs index 3082334102..6697472622 100644 --- a/crates/core_arch/src/simd.rs +++ b/crates/core_arch/src/simd.rs @@ -3,10 +3,9 @@ #![allow(non_camel_case_types)] macro_rules! simd_ty { - ($(#[$stability:meta])? $id:ident [$ety:ident]: $($elem_name:ident),*) => { + ($id:ident [$ety:ident]: $($elem_name:ident),*) => { #[repr(simd)] #[derive(Copy, Clone, Debug, PartialEq)] - $(#[$stability])? pub(crate) struct $id { $(pub $elem_name: $ety),* } #[allow(clippy::use_self)] @@ -188,7 +187,6 @@ simd_ty!(i32x4[i32]: x0, x1, x2, x3); simd_ty!(i64x2[i64]: x0, x1); simd_ty!( - #[unstable(feature = "f16", issue = "116909")] f16x8[f16]: x0, x1, @@ -372,7 +370,6 @@ simd_ty!( simd_ty!(i64x4[i64]: x0, x1, x2, x3); simd_ty!( - #[unstable(feature = "f16", issue = "116909")] f16x16[f16]: x0, x1, @@ -722,7 +719,6 @@ simd_ty!( ); simd_ty!( - #[unstable(feature = "f16", issue = "116909")] f16x32[f16]: x0, x1, diff --git a/crates/core_arch/src/x86/avx512ifma.rs b/crates/core_arch/src/x86/avx512ifma.rs index 3bf9958e3d..a1cb339b38 100644 --- a/crates/core_arch/src/x86/avx512ifma.rs +++ b/crates/core_arch/src/x86/avx512ifma.rs @@ -27,6 +27,7 @@ pub unsafe fn _mm512_madd52hi_epu64(a: __m512i, b: __m512i, c: __m512i) -> __m51 /// from `k` when the corresponding mask bit is not set). /// /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#avx512techs=AVX512IFMA52&text=_mm512_mask_madd52hi_epu64) +#[inline] #[target_feature(enable = "avx512ifma")] #[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmadd52huq))] @@ -47,6 +48,7 @@ pub unsafe fn _mm512_mask_madd52hi_epu64( /// out when the corresponding mask bit is not set). /// /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#avx512techs=AVX512IFMA52&text=_mm512_maskz_madd52hi_epu64) +#[inline] #[target_feature(enable = "avx512ifma")] #[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmadd52huq))] @@ -82,6 +84,7 @@ pub unsafe fn _mm512_madd52lo_epu64(a: __m512i, b: __m512i, c: __m512i) -> __m51 /// from `k` when the corresponding mask bit is not set). /// /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#avx512techs=AVX512IFMA52&text=_mm512_mask_madd52lo_epu64) +#[inline] #[target_feature(enable = "avx512ifma")] #[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmadd52luq))] @@ -102,6 +105,7 @@ pub unsafe fn _mm512_mask_madd52lo_epu64( /// out when the corresponding mask bit is not set). /// /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#avx512techs=AVX512IFMA52&text=_mm512_maskz_madd52lo_epu64) +#[inline] #[target_feature(enable = "avx512ifma")] #[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmadd52luq))] @@ -155,6 +159,7 @@ pub unsafe fn _mm256_madd52hi_epu64(a: __m256i, b: __m256i, c: __m256i) -> __m25 /// from `k` when the corresponding mask bit is not set). /// /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#avx512techs=AVX512IFMA52&text=_mm256_mask_madd52hi_epu64) +#[inline] #[target_feature(enable = "avx512ifma,avx512vl")] #[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmadd52huq))] @@ -175,6 +180,7 @@ pub unsafe fn _mm256_mask_madd52hi_epu64( /// out when the corresponding mask bit is not set). /// /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#avx512techs=AVX512IFMA52&text=_mm256_maskz_madd52hi_epu64) +#[inline] #[target_feature(enable = "avx512ifma,avx512vl")] #[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmadd52huq))] @@ -228,6 +234,7 @@ pub unsafe fn _mm256_madd52lo_epu64(a: __m256i, b: __m256i, c: __m256i) -> __m25 /// from `k` when the corresponding mask bit is not set). /// /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#avx512techs=AVX512IFMA52&text=_mm256_mask_madd52lo_epu64) +#[inline] #[target_feature(enable = "avx512ifma,avx512vl")] #[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmadd52luq))] @@ -248,6 +255,7 @@ pub unsafe fn _mm256_mask_madd52lo_epu64( /// out when the corresponding mask bit is not set). /// /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#avx512techs=AVX512IFMA52&text=_mm256_maskz_madd52lo_epu64) +#[inline] #[target_feature(enable = "avx512ifma,avx512vl")] #[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmadd52luq))] @@ -301,6 +309,7 @@ pub unsafe fn _mm_madd52hi_epu64(a: __m128i, b: __m128i, c: __m128i) -> __m128i /// from `k` when the corresponding mask bit is not set). /// /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#avx512techs=AVX512IFMA52&text=_mm_mask_madd52hi_epu64) +#[inline] #[target_feature(enable = "avx512ifma,avx512vl")] #[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmadd52huq))] @@ -316,6 +325,7 @@ pub unsafe fn _mm_mask_madd52hi_epu64(a: __m128i, k: __mmask8, b: __m128i, c: __ /// out when the corresponding mask bit is not set). /// /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#avx512techs=AVX512IFMA52&text=_mm_maskz_madd52hi_epu64) +#[inline] #[target_feature(enable = "avx512ifma,avx512vl")] #[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmadd52huq))] @@ -364,6 +374,7 @@ pub unsafe fn _mm_madd52lo_epu64(a: __m128i, b: __m128i, c: __m128i) -> __m128i /// from `k` when the corresponding mask bit is not set). /// /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#avx512techs=AVX512IFMA52&text=_mm_mask_madd52lo_epu64) +#[inline] #[target_feature(enable = "avx512ifma,avx512vl")] #[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmadd52luq))] @@ -379,6 +390,7 @@ pub unsafe fn _mm_mask_madd52lo_epu64(a: __m128i, k: __mmask8, b: __m128i, c: __ /// out when the corresponding mask bit is not set). /// /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#avx512techs=AVX512IFMA52&text=_mm_maskz_madd52lo_epu64) +#[inline] #[target_feature(enable = "avx512ifma,avx512vl")] #[unstable(feature = "stdarch_x86_avx512", issue = "111137")] #[cfg_attr(test, assert_instr(vpmadd52luq))] diff --git a/crates/core_arch/src/x86/test.rs b/crates/core_arch/src/x86/test.rs index ebb67356a4..dd78321135 100644 --- a/crates/core_arch/src/x86/test.rs +++ b/crates/core_arch/src/x86/test.rs @@ -38,10 +38,8 @@ pub unsafe fn get_m128(a: __m128, idx: usize) -> f32 { #[track_caller] #[target_feature(enable = "avx512fp16")] -#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub unsafe fn assert_eq_m128h(a: __m128h, b: __m128h) { - // FIXME: use `_mm_cmp_ph_mask::<_CMP_EQ_OQ>` when it's implemented - let r = _mm_cmpeq_epi16_mask(transmute(a), transmute(b)); + let r = _mm_cmp_ph_mask::<_CMP_EQ_OQ>(a, b); if r != 0b1111_1111 { panic!("{:?} != {:?}", a, b); } @@ -90,10 +88,8 @@ pub unsafe fn get_m256(a: __m256, idx: usize) -> f32 { #[track_caller] #[target_feature(enable = "avx512fp16")] -#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub unsafe fn assert_eq_m256h(a: __m256h, b: __m256h) { - // FIXME: use `_mm256_cmp_ph_mask::<_CMP_EQ_OQ>` when it's implemented - let r = _mm256_cmpeq_epi16_mask(transmute(a), transmute(b)); + let r = _mm256_cmp_ph_mask::<_CMP_EQ_OQ>(a, b); if r != 0b11111111_11111111 { panic!("{:?} != {:?}", a, b); } @@ -164,10 +160,8 @@ pub unsafe fn assert_eq_m512d(a: __m512d, b: __m512d) { #[track_caller] #[target_feature(enable = "avx512fp16")] -#[unstable(feature = "stdarch_x86_avx512_f16", issue = "127213")] pub unsafe fn assert_eq_m512h(a: __m512h, b: __m512h) { - // FIXME: use `_mm512_cmp_ph_mask::<_CMP_EQ_OQ>` when it's implemented - let r = _mm512_cmpeq_epi16_mask(transmute(a), transmute(b)); + let r = _mm512_cmp_ph_mask::<_CMP_EQ_OQ>(a, b); if r != 0b11111111_11111111_11111111_11111111 { panic!("{:?} != {:?}", a, b); } diff --git a/crates/stdarch-verify/tests/x86-intel.rs b/crates/stdarch-verify/tests/x86-intel.rs index 1416afdec0..23d68436c5 100644 --- a/crates/stdarch-verify/tests/x86-intel.rs +++ b/crates/stdarch-verify/tests/x86-intel.rs @@ -161,7 +161,7 @@ fn verify_all_signatures() { // https://software.intel.com/sites/landingpage/IntrinsicsGuide/# // // Open up the network console and you'll see an xml file was downloaded - // (currently called data-3.6.8.xml). That's the file we downloaded + // (currently called data-3.6.9.xml). That's the file we downloaded // here. let xml = include_bytes!("../x86-intel.xml"); diff --git a/crates/stdarch-verify/x86-intel.xml b/crates/stdarch-verify/x86-intel.xml index 99ed2262e1..41f2119e68 100644 --- a/crates/stdarch-verify/x86-intel.xml +++ b/crates/stdarch-verify/x86-intel.xml @@ -1,161302 +1,142225 @@ - - - - - - - - Add unsigned 32-bit integers "a" and "b" with unsigned 8-bit carry-in "c_in" - (carry or overflow flag), and store the unsigned 32-bit result in "out", and the - carry-out in "dst" (carry or overflow flag). - - tmp[32:0] := a[31:0] + b[31:0] + (c_in > 0 ? 1 : 0) - MEM[out+31:out] := tmp[31:0] - dst[0] := tmp[32] - dst[7:1] := 0 - - - - ADX -

immintrin.h
- Arithmetic + + + + + + + + Add unsigned 32-bit integers "a" and "b" with unsigned 8-bit carry-in "c_in" (carry or overflow flag), and store the unsigned 32-bit result in "out", and the carry-out in "dst" (carry or overflow flag). + +tmp[32:0] := a[31:0] + b[31:0] + (c_in > 0 ? 1 : 0) +MEM[out+31:out] := tmp[31:0] +dst[0] := tmp[32] +dst[7:1] := 0 + + + + ADX +
immintrin.h
+ Arithmetic
- - - - - - Add unsigned 64-bit integers "a" and "b" with unsigned 8-bit carry-in "c_in" - (carry or overflow flag), and store the unsigned 64-bit result in "out", and the - carry-out in "dst" (carry or overflow flag). - - tmp[64:0] := a[63:0] + b[63:0] + (c_in > 0 ? 1 : 0) - MEM[out+63:out] := tmp[63:0] - dst[0] := tmp[64] - dst[7:1] := 0 - - - - ADX -
immintrin.h
- Arithmetic -
- - - - - Perform one round of an AES encryption flow on data (state) in "a" using the - round key in "RoundKey", and store the result in "dst"." - a[127:0] := ShiftRows(a[127:0]) - a[127:0] := SubBytes(a[127:0]) - a[127:0] := MixColumns(a[127:0]) - dst[127:0] := a[127:0] XOR RoundKey[127:0] - - - AES -
wmmintrin.h
- Cryptography + + + + + + Add unsigned 64-bit integers "a" and "b" with unsigned 8-bit carry-in "c_in" (carry or overflow flag), and store the unsigned 64-bit result in "out", and the carry-out in "dst" (carry or overflow flag). + +tmp[64:0] := a[63:0] + b[63:0] + (c_in > 0 ? 1 : 0) +MEM[out+63:out] := tmp[63:0] +dst[0] := tmp[64] +dst[7:1] := 0 + + + + ADX +
immintrin.h
+ Arithmetic +
+ + + + + Perform one round of an AES encryption flow on data (state) in "a" using the round key in "RoundKey", and store the result in "dst"." + a[127:0] := ShiftRows(a[127:0]) +a[127:0] := SubBytes(a[127:0]) +a[127:0] := MixColumns(a[127:0]) +dst[127:0] := a[127:0] XOR RoundKey[127:0] + + + AES +
wmmintrin.h
+ Cryptography
- - - - Perform the last round of an AES encryption flow on data (state) in "a" using - the round key in "RoundKey", and store the result in "dst"." - a[127:0] := ShiftRows(a[127:0]) - a[127:0] := SubBytes(a[127:0]) - dst[127:0] := a[127:0] XOR RoundKey[127:0] - - - AES -
wmmintrin.h
- Cryptography + + + + Perform the last round of an AES encryption flow on data (state) in "a" using the round key in "RoundKey", and store the result in "dst"." + a[127:0] := ShiftRows(a[127:0]) +a[127:0] := SubBytes(a[127:0]) +dst[127:0] := a[127:0] XOR RoundKey[127:0] + + + AES +
wmmintrin.h
+ Cryptography
- - - - Perform one round of an AES decryption flow on data (state) in "a" using the - round key in "RoundKey", and store the result in "dst". - a[127:0] := InvShiftRows(a[127:0]) - a[127:0] := InvSubBytes(a[127:0]) - a[127:0] := InvMixColumns(a[127:0]) - dst[127:0] := a[127:0] XOR RoundKey[127:0] - - - AES -
wmmintrin.h
- Cryptography + + + + Perform one round of an AES decryption flow on data (state) in "a" using the round key in "RoundKey", and store the result in "dst". + a[127:0] := InvShiftRows(a[127:0]) +a[127:0] := InvSubBytes(a[127:0]) +a[127:0] := InvMixColumns(a[127:0]) +dst[127:0] := a[127:0] XOR RoundKey[127:0] + + + AES +
wmmintrin.h
+ Cryptography
- - - - Perform the last round of an AES decryption flow on data (state) in "a" using - the round key in "RoundKey", and store the result in "dst". - a[127:0] := InvShiftRows(a[127:0]) - a[127:0] := InvSubBytes(a[127:0]) - dst[127:0] := a[127:0] XOR RoundKey[127:0] - - - AES -
wmmintrin.h
- Cryptography + + + + Perform the last round of an AES decryption flow on data (state) in "a" using the round key in "RoundKey", and store the result in "dst". + a[127:0] := InvShiftRows(a[127:0]) +a[127:0] := InvSubBytes(a[127:0]) +dst[127:0] := a[127:0] XOR RoundKey[127:0] + + + AES +
wmmintrin.h
+ Cryptography
- - - Perform the InvMixColumns transformation on "a" and store the result in "dst". - dst[127:0] := InvMixColumns(a[127:0]) - - - AES -
wmmintrin.h
- Cryptography + + + Perform the InvMixColumns transformation on "a" and store the result in "dst". + dst[127:0] := InvMixColumns(a[127:0]) + + + AES +
wmmintrin.h
+ Cryptography
- - - - Assist in expanding the AES cipher key by computing steps towards generating a - round key for encryption cipher using data from "a" and an 8-bit round constant - specified in "imm8", and store the result in "dst"." - X3[31:0] := a[127:96] - X2[31:0] := a[95:64] - X1[31:0] := a[63:32] - X0[31:0] := a[31:0] - RCON[31:0] := ZeroExtend32(imm8[7:0]) - dst[31:0] := SubWord(X1) - dst[63:32] := RotWord(SubWord(X1)) XOR RCON - dst[95:64] := SubWord(X3) - dst[127:96] := RotWord(SubWord(X3)) XOR RCON - - - AES -
wmmintrin.h
- Cryptography -
- - - - - - - - Compute dot-product of BF16 (16-bit) floating-point pairs in tiles "a" and "b", - accumulating the intermediate single-precision (32-bit) floating-point elements with - elements in "dst", and store the 32-bit result back to tile "dst". - FOR m := 0 TO dst.rows - 1 - tmp := dst.row[m] - FOR k := 0 TO (a.colsb / 4) - 1 - FOR n := 0 TO (dst.colsb / 4) - 1 + + + + Assist in expanding the AES cipher key by computing steps towards generating a round key for encryption cipher using data from "a" and an 8-bit round constant specified in "imm8", and store the result in "dst"." + X3[31:0] := a[127:96] +X2[31:0] := a[95:64] +X1[31:0] := a[63:32] +X0[31:0] := a[31:0] +RCON[31:0] := ZeroExtend32(imm8[7:0]) +dst[31:0] := SubWord(X1) +dst[63:32] := RotWord(SubWord(X1)) XOR RCON +dst[95:64] := SubWord(X3) +dst[127:96] := RotWord(SubWord(X3)) XOR RCON + + + AES +
wmmintrin.h
+ Cryptography +
+ + + + + + + + Compute dot-product of BF16 (16-bit) floating-point pairs in tiles "a" and "b", accumulating the intermediate single-precision (32-bit) floating-point elements with elements in "dst", and store the 32-bit result back to tile "dst". + FOR m := 0 TO dst.rows - 1 + tmp := dst.row[m] + FOR k := 0 TO (a.colsb / 4) - 1 + FOR n := 0 TO (dst.colsb / 4) - 1 tmp.fp32[n] += FP32(a.row[m].bf16[2*k+0]) * FP32(b.row[k].bf16[2*n+0]) tmp.fp32[n] += FP32(a.row[m].bf16[2*k+1]) * FP32(b.row[k].bf16[2*n+1]) - ENDFOR - ENDFOR - write_row_and_zero(dst, m, tmp, dst.colsb) - ENDFOR - zero_upper_rows(dst, dst.rows) - zero_tileconfig_start() - - - AMX-BF16 -
immintrin.h
- Application-Targeted + ENDFOR + ENDFOR + write_row_and_zero(dst, m, tmp, dst.colsb) +ENDFOR +zero_upper_rows(dst, dst.rows) +zero_tileconfig_start() + + + AMX-BF16 +
immintrin.h
+ Application-Targeted
- - Compute dot-product of BF16 (16-bit) floating-point pairs in tiles "src0" and - "src1", accumulating the intermediate single-precision (32-bit) floating-point elements - with elements in "dst", and store the 32-bit result back to tile "dst". The shape of - tile is specified in the struct of __tile1024i. The register of the tile is allocated by - compiler. - - FOR m := 0 TO dst.rows - 1 - tmp := dst.row[m] - FOR k := 0 TO (src0.colsb / 4) - 1 - FOR n := 0 TO (dst.colsb / 4) - 1 + + Compute dot-product of BF16 (16-bit) floating-point pairs in tiles "src0" and "src1", accumulating the intermediate single-precision (32-bit) floating-point elements with elements in "dst", and store the 32-bit result back to tile "dst". The shape of tile is specified in the struct of __tile1024i. The register of the tile is allocated by compiler. + + FOR m := 0 TO dst.rows - 1 + tmp := dst.row[m] + FOR k := 0 TO (src0.colsb / 4) - 1 + FOR n := 0 TO (dst.colsb / 4) - 1 tmp.fp32[n] += FP32(src0.row[m].bf16[2*k+0]) * FP32(src1.row[k].bf16[2*n+0]) tmp.fp32[n] += FP32(src0.row[m].bf16[2*k+1]) * FP32(src1.row[k].bf16[2*n+1]) - ENDFOR - ENDFOR - write_row_and_zero(dst, m, tmp, dst.colsb) - ENDFOR - zero_upper_rows(dst, dst.rows) - zero_tileconfig_start() + ENDFOR + ENDFOR + write_row_and_zero(dst, m, tmp, dst.colsb) +ENDFOR +zero_upper_rows(dst, dst.rows) +zero_tileconfig_start() - - - - AMX-BF16 -
immintrin.h
- Application-Targeted -
- - - - - - - - Perform matrix multiplication of two tiles containing complex elements and - accumulate the results into a packed single precision tile. Each dword element in input - tiles "a" and "b" is interpreted as a complex number with FP16 real part and FP16 - imaginary part. Calculates the imaginary part of the result. For each possible - combination of (row of "a", column of "b"), it performs a set of multiplication and - accumulations on all corresponding complex numbers (one from "a" and one from "b"). The - imaginary part of the "a" element is multiplied with the real part of the corresponding - "b" element, and the real part of the "a" element is multiplied with the imaginary part - of the corresponding "b" elements. The two accumulated results are added, and then - accumulated into the corresponding row and column of "dst". - FOR m := 0 TO dst.rows - 1 - tmp := dst.row[m] - FOR k := 0 TO (a.colsb / 4) - 1 - FOR n := 0 TO (dst.colsb / 4) - 1 + + + + AMX-BF16 +
immintrin.h
+ Application-Targeted +
+ + + + + + + Perform matrix multiplication of two tiles containing complex elements and accumulate the results into a packed single precision tile. Each dword element in input tiles "a" and "b" is interpreted as a complex number with FP16 real part and FP16 imaginary part. Calculates the imaginary part of the result. For each possible combination of (row of "a", column of "b"), it performs a set of multiplication and accumulations on all corresponding complex numbers (one from "a" and one from "b"). The imaginary part of the "a" element is multiplied with the real part of the corresponding "b" element, and the real part of the "a" element is multiplied with the imaginary part of the corresponding "b" elements. The two accumulated results are added, and then accumulated into the corresponding row and column of "dst". + FOR m := 0 TO dst.rows - 1 + tmp := dst.row[m] + FOR k := 0 TO (a.colsb / 4) - 1 + FOR n := 0 TO (dst.colsb / 4) - 1 tmp.fp32[n] += FP32(a.row[m].fp16[2*k+0]) * FP32(b.row[k].fp16[2*n+1]) tmp.fp32[n] += FP32(a.row[m].fp16[2*k+1]) * FP32(b.row[k].fp16[2*n+0]) - ENDFOR - ENDFOR - write_row_and_zero(dst, m, tmp, dst.colsb) - ENDFOR - zero_upper_rows(dst, dst.rows) - zero_tileconfig_start() - - - AMX-COMPLEX -
immintrin.h
- Application-Targeted + ENDFOR + ENDFOR + write_row_and_zero(dst, m, tmp, dst.colsb) +ENDFOR +zero_upper_rows(dst, dst.rows) +zero_tileconfig_start() + + + AMX-COMPLEX +
immintrin.h
+ Application-Targeted
- - - - - Perform matrix multiplication of two tiles containing complex elements and - accumulate the results into a packed single precision tile. Each dword element in input - tiles "a" and "b" is interpreted as a complex number with FP16 real part and FP16 - imaginary part. Calculates the real part of the result. For each possible combination of - (row of "a", column of "b"), it performs a set of multiplication and accumulations on - all corresponding complex numbers (one from "a" and one from "b"). The real part of the - "a" element is multiplied with the real part of the corresponding "b" element, and the - negated imaginary part of the "a" element is multiplied with the imaginary part of the - corresponding "b" elements. The two accumulated results are added, and then accumulated - into the corresponding row and column of "dst". - FOR m := 0 TO dst.rows - 1 - tmp := dst.row[m] - FOR k := 0 TO (a.colsb / 4) - 1 - FOR n := 0 TO (dst.colsb / 4) - 1 + + + + + Perform matrix multiplication of two tiles containing complex elements and accumulate the results into a packed single precision tile. Each dword element in input tiles "a" and "b" is interpreted as a complex number with FP16 real part and FP16 imaginary part. Calculates the real part of the result. For each possible combination of (row of "a", column of "b"), it performs a set of multiplication and accumulations on all corresponding complex numbers (one from "a" and one from "b"). The real part of the "a" element is multiplied with the real part of the corresponding "b" element, and the negated imaginary part of the "a" element is multiplied with the imaginary part of the corresponding "b" elements. The two accumulated results are added, and then accumulated into the corresponding row and column of "dst". + FOR m := 0 TO dst.rows - 1 + tmp := dst.row[m] + FOR k := 0 TO (a.colsb / 4) - 1 + FOR n := 0 TO (dst.colsb / 4) - 1 tmp.fp32[n] += FP32(a.row[m].fp16[2*k+0]) * FP32(b.row[k].fp16[2*n+0]) tmp.fp32[n] += FP32(-a.row[m].fp16[2*k+1]) * FP32(b.row[k].fp16[2*n+1]) - ENDFOR - ENDFOR - write_row_and_zero(dst, m, tmp, dst.colsb) - ENDFOR - zero_upper_rows(dst, dst.rows) - zero_tileconfig_start() + ENDFOR + ENDFOR + write_row_and_zero(dst, m, tmp, dst.colsb) +ENDFOR +zero_upper_rows(dst, dst.rows) +zero_tileconfig_start() - - AMX-COMPLEX -
immintrin.h
- Application-Targeted + + AMX-COMPLEX +
immintrin.h
+ Application-Targeted
- - Perform matrix multiplication of two tiles containing complex elements and - accumulate the results into a packed single precision tile. Each dword element in input - tiles "src0" and "src1" is interpreted as a complex number with FP16 real part and FP16 - imaginary part. This function calculates the imaginary part of the result. - - FOR m := 0 TO dst.rows - 1 - tmp := dst.row[m] - FOR k := 0 TO (src0.colsb / 4) - 1 - FOR n := 0 TO (dst.colsb / 4) - 1 + + Perform matrix multiplication of two tiles containing complex elements and accumulate the results into a packed single precision tile. Each dword element in input tiles "src0" and "src1" is interpreted as a complex number with FP16 real part and FP16 imaginary part. This function calculates the imaginary part of the result. + + FOR m := 0 TO dst.rows - 1 + tmp := dst.row[m] + FOR k := 0 TO (src0.colsb / 4) - 1 + FOR n := 0 TO (dst.colsb / 4) - 1 tmp.fp32[n] += FP32(src0.row[m].fp16[2*k+0]) * FP32(src1.row[k].fp16[2*n+1]) tmp.fp32[n] += FP32(src0.row[m].fp16[2*k+1]) * FP32(src1.row[k].fp16[2*n+0]) - ENDFOR - ENDFOR - write_row_and_zero(dst, m, tmp, dst.colsb) - ENDFOR - zero_upper_rows(dst, dst.rows) - zero_tileconfig_start() + ENDFOR + ENDFOR + write_row_and_zero(dst, m, tmp, dst.colsb) +ENDFOR +zero_upper_rows(dst, dst.rows) +zero_tileconfig_start() - - - - AMX-COMPLEX -
immintrin.h
- Application-Targeted + + + + AMX-COMPLEX +
immintrin.h
+ Application-Targeted
- - Perform matrix multiplication of two tiles containing complex elements and - accumulate the results into a packed single precision tile. Each dword element in input - tiles src0 and src1 is interpreted as a complex number with FP16 real part and FP16 - imaginary part. This function calculates the real part of the result. - - FOR m := 0 TO dst.rows - 1 - tmp := dst.row[m] - FOR k := 0 TO (src0.colsb / 4) - 1 - FOR n := 0 TO (dst.colsb / 4) - 1 + + Perform matrix multiplication of two tiles containing complex elements and accumulate the results into a packed single precision tile. Each dword element in input tiles src0 and src1 is interpreted as a complex number with FP16 real part and FP16 imaginary part. This function calculates the real part of the result. + + FOR m := 0 TO dst.rows - 1 + tmp := dst.row[m] + FOR k := 0 TO (src0.colsb / 4) - 1 + FOR n := 0 TO (dst.colsb / 4) - 1 tmp.fp32[n] += FP32(src0.row[m].fp16[2*k+0]) * FP32(src1.row[k].fp16[2*n+0]) tmp.fp32[n] += FP32(-src0.row[m].fp16[2*k+1]) * FP32(src1.row[k].fp16[2*n+1]) - ENDFOR - ENDFOR - write_row_and_zero(dst, m, tmp, dst.colsb) - ENDFOR - zero_upper_rows(dst, dst.rows) - zero_tileconfig_start() + ENDFOR + ENDFOR + write_row_and_zero(dst, m, tmp, dst.colsb) +ENDFOR +zero_upper_rows(dst, dst.rows) +zero_tileconfig_start() - - - - AMX-COMPLEX -
immintrin.h
- Application-Targeted -
- - - - - - - Compute dot-product of FP16 (16-bit) floating-point pairs in tiles "a" and "b", - accumulating the intermediate single-precision (32-bit) floating-point elements with - elements in "dst", and store the 32-bit result back to tile "dst". - FOR m := 0 TO dst.rows - 1 - tmp := dst.row[m] - FOR k := 0 TO (a.colsb / 4) - 1 - FOR n := 0 TO (dst.colsb / 4) - 1 + + + + AMX-COMPLEX +
immintrin.h
+ Application-Targeted +
+ + + + + + + Compute dot-product of FP16 (16-bit) floating-point pairs in tiles "a" and "b", accumulating the intermediate single-precision (32-bit) floating-point elements with elements in "dst", and store the 32-bit result back to tile "dst". + FOR m := 0 TO dst.rows - 1 + tmp := dst.row[m] + FOR k := 0 TO (a.colsb / 4) - 1 + FOR n := 0 TO (dst.colsb / 4) - 1 tmp.fp32[n] += FP32(a.row[m].fp16[2*k+0]) * FP32(b.row[k].fp16[2*n+0]) tmp.fp32[n] += FP32(a.row[m].fp16[2*k+1]) * FP32(b.row[k].fp16[2*n+1]) - ENDFOR - ENDFOR - write_row_and_zero(dst, m, tmp, dst.colsb) - ENDFOR - zero_upper_rows(dst, dst.rows) - zero_tileconfig_start() - - - AMX-FP16 -
immintrin.h
- Application-Targeted + ENDFOR + ENDFOR + write_row_and_zero(dst, m, tmp, dst.colsb) +ENDFOR +zero_upper_rows(dst, dst.rows) +zero_tileconfig_start() + + + AMX-FP16 +
immintrin.h
+ Application-Targeted
- - Compute dot-product of FP16 (16-bit) floating-point pairs in tiles "src0" and - "src1", accumulating the intermediate single-precision (32-bit) floating-point elements - with elements in "dst", and store the 32-bit result back to tile "dst". The shape of - tile is specified in the struct of __tile1024i. The register of the tile is allocated by - compiler. - - FOR m := 0 TO dst.rows - 1 - tmp := dst.row[m] - FOR k := 0 TO (src0.colsb / 4) - 1 - FOR n := 0 TO (dst.colsb / 4) - 1 + + Compute dot-product of FP16 (16-bit) floating-point pairs in tiles "src0" and "src1", accumulating the intermediate single-precision (32-bit) floating-point elements with elements in "dst", and store the 32-bit result back to tile "dst". The shape of tile is specified in the struct of __tile1024i. The register of the tile is allocated by compiler. + + FOR m := 0 TO dst.rows - 1 + tmp := dst.row[m] + FOR k := 0 TO (src0.colsb / 4) - 1 + FOR n := 0 TO (dst.colsb / 4) - 1 tmp.fp32[n] += FP32(src0.row[m].fp16[2*k+0]) * FP32(src1.row[k].fp16[2*n+0]) tmp.fp32[n] += FP32(src0.row[m].fp16[2*k+1]) * FP32(src1.row[k].fp16[2*n+1]) - ENDFOR - ENDFOR - write_row_and_zero(dst, m, tmp, dst.colsb) - ENDFOR - zero_upper_rows(dst, dst.rows) - zero_tileconfig_start() + ENDFOR + ENDFOR + write_row_and_zero(dst, m, tmp, dst.colsb) +ENDFOR +zero_upper_rows(dst, dst.rows) +zero_tileconfig_start() - - - - AMX-FP16 -
immintrin.h
- Application-Targeted -
- - - - - - - - Compute dot-product of bytes in tiles with a source/destination accumulator. - Multiply groups of 4 adjacent pairs of signed 8-bit integers in "a" with corresponding - unsigned 8-bit integers in "b", producing 4 intermediate 32-bit results. Sum these 4 - results with the corresponding 32-bit integer in "dst", and store the 32-bit result back - to tile "dst". - DEFINE DPBD(c, x, y) { - tmp1 := SignExtend32(x.byte[0]) * ZeroExtend32(y.byte[0]) - tmp2 := SignExtend32(x.byte[1]) * ZeroExtend32(y.byte[1]) - tmp3 := SignExtend32(x.byte[2]) * ZeroExtend32(y.byte[2]) - tmp4 := SignExtend32(x.byte[3]) * ZeroExtend32(y.byte[3]) - - RETURN c + tmp1 + tmp2 + tmp3 + tmp4 - } - FOR m := 0 TO dst.rows - 1 - tmp := dst.row[m] - FOR k := 0 TO (a.colsb / 4) - 1 - FOR n := 0 TO (dst.colsb / 4) - 1 + + + + AMX-FP16 +
immintrin.h
+ Application-Targeted +
+ + + + + + + Compute dot-product of bytes in tiles with a source/destination accumulator. Multiply groups of 4 adjacent pairs of signed 8-bit integers in "a" with corresponding unsigned 8-bit integers in "b", producing 4 intermediate 32-bit results. Sum these 4 results with the corresponding 32-bit integer in "dst", and store the 32-bit result back to tile "dst". + DEFINE DPBD(c, x, y) { + tmp1 := SignExtend32(x.byte[0]) * ZeroExtend32(y.byte[0]) + tmp2 := SignExtend32(x.byte[1]) * ZeroExtend32(y.byte[1]) + tmp3 := SignExtend32(x.byte[2]) * ZeroExtend32(y.byte[2]) + tmp4 := SignExtend32(x.byte[3]) * ZeroExtend32(y.byte[3]) + + RETURN c + tmp1 + tmp2 + tmp3 + tmp4 +} +FOR m := 0 TO dst.rows - 1 + tmp := dst.row[m] + FOR k := 0 TO (a.colsb / 4) - 1 + FOR n := 0 TO (dst.colsb / 4) - 1 tmp.dword[n] := DPBD(tmp.dword[n], a.row[m].dword[k], b.row[k].dword[n]) - ENDFOR - ENDFOR - write_row_and_zero(dst, m, tmp, dst.colsb) - ENDFOR - zero_upper_rows(dst, dst.rows) - zero_tileconfig_start() - - - AMX-INT8 -
immintrin.h
- Application-Targeted + ENDFOR + ENDFOR + write_row_and_zero(dst, m, tmp, dst.colsb) +ENDFOR +zero_upper_rows(dst, dst.rows) +zero_tileconfig_start() + + + AMX-INT8 +
immintrin.h
+ Application-Targeted
- - - - - Compute dot-product of bytes in tiles with a source/destination accumulator. - Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding - signed 8-bit integers in "b", producing 4 intermediate 32-bit results. Sum these 4 - results with the corresponding 32-bit integer in "dst", and store the 32-bit result back - to tile "dst". - DEFINE DPBD(c, x, y) { - tmp1 := ZeroExtend32(x.byte[0]) * SignExtend32(y.byte[0]) - tmp2 := ZeroExtend32(x.byte[1]) * SignExtend32(y.byte[1]) - tmp3 := ZeroExtend32(x.byte[2]) * SignExtend32(y.byte[2]) - tmp4 := ZeroExtend32(x.byte[3]) * SignExtend32(y.byte[3]) - - RETURN c + tmp1 + tmp2 + tmp3 + tmp4 - } - FOR m := 0 TO dst.rows - 1 - tmp := dst.row[m] - FOR k := 0 TO (a.colsb / 4) - 1 - FOR n := 0 TO (dst.colsb / 4) - 1 + + + + + Compute dot-product of bytes in tiles with a source/destination accumulator. Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate 32-bit results. Sum these 4 results with the corresponding 32-bit integer in "dst", and store the 32-bit result back to tile "dst". + DEFINE DPBD(c, x, y) { + tmp1 := ZeroExtend32(x.byte[0]) * SignExtend32(y.byte[0]) + tmp2 := ZeroExtend32(x.byte[1]) * SignExtend32(y.byte[1]) + tmp3 := ZeroExtend32(x.byte[2]) * SignExtend32(y.byte[2]) + tmp4 := ZeroExtend32(x.byte[3]) * SignExtend32(y.byte[3]) + + RETURN c + tmp1 + tmp2 + tmp3 + tmp4 +} +FOR m := 0 TO dst.rows - 1 + tmp := dst.row[m] + FOR k := 0 TO (a.colsb / 4) - 1 + FOR n := 0 TO (dst.colsb / 4) - 1 tmp.dword[n] := DPBD(tmp.dword[n], a.row[m].dword[k], b.row[k].dword[n]) - ENDFOR - ENDFOR - write_row_and_zero(dst, m, tmp, dst.colsb) - ENDFOR - zero_upper_rows(dst, dst.rows) - zero_tileconfig_start() - - - AMX-INT8 -
immintrin.h
- Application-Targeted + ENDFOR + ENDFOR + write_row_and_zero(dst, m, tmp, dst.colsb) +ENDFOR +zero_upper_rows(dst, dst.rows) +zero_tileconfig_start() +
+ + AMX-INT8 +
immintrin.h
+ Application-Targeted
- - - - - Compute dot-product of bytes in tiles with a source/destination accumulator. - Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding - unsigned 8-bit integers in "b", producing 4 intermediate 32-bit results. Sum these 4 - results with the corresponding 32-bit integer in "dst", and store the 32-bit result back - to tile "dst". - DEFINE DPBD(c, x, y) { - tmp1 := ZeroExtend32(x.byte[0]) * ZeroExtend32(y.byte[0]) - tmp2 := ZeroExtend32(x.byte[1]) * ZeroExtend32(y.byte[1]) - tmp3 := ZeroExtend32(x.byte[2]) * ZeroExtend32(y.byte[2]) - tmp4 := ZeroExtend32(x.byte[3]) * ZeroExtend32(y.byte[3]) - - RETURN c + tmp1 + tmp2 + tmp3 + tmp4 - } - FOR m := 0 TO dst.rows - 1 - tmp := dst.row[m] - FOR k := 0 TO (a.colsb / 4) - 1 - FOR n := 0 TO (dst.colsb / 4) - 1 + + + + + Compute dot-product of bytes in tiles with a source/destination accumulator. Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding unsigned 8-bit integers in "b", producing 4 intermediate 32-bit results. Sum these 4 results with the corresponding 32-bit integer in "dst", and store the 32-bit result back to tile "dst". + DEFINE DPBD(c, x, y) { + tmp1 := ZeroExtend32(x.byte[0]) * ZeroExtend32(y.byte[0]) + tmp2 := ZeroExtend32(x.byte[1]) * ZeroExtend32(y.byte[1]) + tmp3 := ZeroExtend32(x.byte[2]) * ZeroExtend32(y.byte[2]) + tmp4 := ZeroExtend32(x.byte[3]) * ZeroExtend32(y.byte[3]) + + RETURN c + tmp1 + tmp2 + tmp3 + tmp4 +} +FOR m := 0 TO dst.rows - 1 + tmp := dst.row[m] + FOR k := 0 TO (a.colsb / 4) - 1 + FOR n := 0 TO (dst.colsb / 4) - 1 tmp.dword[n] := DPBD(tmp.dword[n], a.row[m].dword[k], b.row[k].dword[n]) - ENDFOR - ENDFOR - write_row_and_zero(dst, m, tmp, dst.colsb) - ENDFOR - zero_upper_rows(dst, dst.rows) - zero_tileconfig_start() - - - AMX-INT8 -
immintrin.h
- Application-Targeted + ENDFOR + ENDFOR + write_row_and_zero(dst, m, tmp, dst.colsb) +ENDFOR +zero_upper_rows(dst, dst.rows) +zero_tileconfig_start() +
+ + AMX-INT8 +
immintrin.h
+ Application-Targeted
- - - - - Compute dot-product of bytes in tiles with a source/destination accumulator. - Multiply groups of 4 adjacent pairs of signed 8-bit integers in "a" with corresponding - signed 8-bit integers in "b", producing 4 intermediate 32-bit results. Sum these 4 - results with the corresponding 32-bit integer in "dst", and store the 32-bit result back - to tile "dst". - DEFINE DPBD(c, x, y) { - tmp1 := SignExtend32(x.byte[0]) * SignExtend32(y.byte[0]) - tmp2 := SignExtend32(x.byte[1]) * SignExtend32(y.byte[1]) - tmp3 := SignExtend32(x.byte[2]) * SignExtend32(y.byte[2]) - tmp4 := SignExtend32(x.byte[3]) * SignExtend32(y.byte[3]) - - RETURN c + tmp1 + tmp2 + tmp3 + tmp4 - } - FOR m := 0 TO dst.rows - 1 - tmp := dst.row[m] - FOR k := 0 TO (a.colsb / 4) - 1 - FOR n := 0 TO (dst.colsb / 4) - 1 + + + + + Compute dot-product of bytes in tiles with a source/destination accumulator. Multiply groups of 4 adjacent pairs of signed 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate 32-bit results. Sum these 4 results with the corresponding 32-bit integer in "dst", and store the 32-bit result back to tile "dst". + DEFINE DPBD(c, x, y) { + tmp1 := SignExtend32(x.byte[0]) * SignExtend32(y.byte[0]) + tmp2 := SignExtend32(x.byte[1]) * SignExtend32(y.byte[1]) + tmp3 := SignExtend32(x.byte[2]) * SignExtend32(y.byte[2]) + tmp4 := SignExtend32(x.byte[3]) * SignExtend32(y.byte[3]) + + RETURN c + tmp1 + tmp2 + tmp3 + tmp4 +} +FOR m := 0 TO dst.rows - 1 + tmp := dst.row[m] + FOR k := 0 TO (a.colsb / 4) - 1 + FOR n := 0 TO (dst.colsb / 4) - 1 tmp.dword[n] := DPBD(tmp.dword[n], a.row[m].dword[k], b.row[k].dword[n]) - ENDFOR - ENDFOR - write_row_and_zero(dst, m, tmp, dst.colsb) - ENDFOR - zero_upper_rows(dst, dst.rows) - zero_tileconfig_start() - - - AMX-INT8 -
immintrin.h
- Application-Targeted + ENDFOR + ENDFOR + write_row_and_zero(dst, m, tmp, dst.colsb) +ENDFOR +zero_upper_rows(dst, dst.rows) +zero_tileconfig_start() +
+ + AMX-INT8 +
immintrin.h
+ Application-Targeted
- - Compute dot-product of bytes in tiles with a source/destination accumulator. - Multiply groups of 4 adjacent pairs of signed 8-bit integers in "src0" with - corresponding signed 8-bit integers in "src1", producing 4 intermediate 32-bit results. - Sum these 4 results with the corresponding 32-bit integer in "dst", and store the 32-bit - result back to tile "dst". The shape of tile is specified in the struct of __tile1024i. - The register of the tile is allocated by compiler. - - DEFINE DPBD(c, x, y) { - tmp1 := SignExtend32(x.byte[0]) * SignExtend32(y.byte[0]) - tmp2 := SignExtend32(x.byte[1]) * SignExtend32(y.byte[1]) - tmp3 := SignExtend32(x.byte[2]) * SignExtend32(y.byte[2]) - tmp4 := SignExtend32(x.byte[3]) * SignExtend32(y.byte[3]) - RETURN c + tmp1 + tmp2 + tmp3 + tmp4 - } - FOR m := 0 TO dst.rows - 1 - tmp := dst.row[m] - FOR k := 0 TO (src0.colsb / 4) - 1 - FOR n := 0 TO (dst.colsb / 4) - 1 + + Compute dot-product of bytes in tiles with a source/destination accumulator. Multiply groups of 4 adjacent pairs of signed 8-bit integers in "src0" with corresponding signed 8-bit integers in "src1", producing 4 intermediate 32-bit results. Sum these 4 results with the corresponding 32-bit integer in "dst", and store the 32-bit result back to tile "dst". The shape of tile is specified in the struct of __tile1024i. The register of the tile is allocated by compiler. + + DEFINE DPBD(c, x, y) { + tmp1 := SignExtend32(x.byte[0]) * SignExtend32(y.byte[0]) + tmp2 := SignExtend32(x.byte[1]) * SignExtend32(y.byte[1]) + tmp3 := SignExtend32(x.byte[2]) * SignExtend32(y.byte[2]) + tmp4 := SignExtend32(x.byte[3]) * SignExtend32(y.byte[3]) + RETURN c + tmp1 + tmp2 + tmp3 + tmp4 +} +FOR m := 0 TO dst.rows - 1 + tmp := dst.row[m] + FOR k := 0 TO (src0.colsb / 4) - 1 + FOR n := 0 TO (dst.colsb / 4) - 1 tmp.dword[n] := DPBD(tmp.dword[n], src0.row[m].dword[k], src1.row[k].dword[n]) - ENDFOR - ENDFOR - write_row_and_zero(dst, m, tmp, dst.colsb) - ENDFOR - zero_upper_rows(dst, dst.rows) - zero_tileconfig_start() + ENDFOR + ENDFOR + write_row_and_zero(dst, m, tmp, dst.colsb) +ENDFOR +zero_upper_rows(dst, dst.rows) +zero_tileconfig_start() - - - - AMX-INT8 -
immintrin.h
- Application-Targeted + + + + AMX-INT8 +
immintrin.h
+ Application-Targeted
- - Compute dot-product of bytes in tiles with a source/destination accumulator. - Multiply groups of 4 adjacent pairs of signed 8-bit integers in "src0" with - corresponding unsigned 8-bit integers in "src1", producing 4 intermediate 32-bit - results. Sum these 4 results with the corresponding 32-bit integer in "dst", and store - the 32-bit result back to tile "dst". The shape of tile is specified in the struct of - __tile1024i. The register of the tile is allocated by compiler. - - DEFINE DPBD(c, x, y) { - tmp1 := SignExtend32(x.byte[0]) * ZeroExtend32(y.byte[0]) - tmp2 := SignExtend32(x.byte[1]) * ZeroExtend32(y.byte[1]) - tmp3 := SignExtend32(x.byte[2]) * ZeroExtend32(y.byte[2]) - tmp4 := SignExtend32(x.byte[3]) * ZeroExtend32(y.byte[3]) - RETURN c + tmp1 + tmp2 + tmp3 + tmp4 - } - FOR m := 0 TO dst.rows - 1 - tmp := dst.row[m] - FOR k := 0 TO (src0.colsb / 4) - 1 - FOR n := 0 TO (dst.colsb / 4) - 1 + + Compute dot-product of bytes in tiles with a source/destination accumulator. Multiply groups of 4 adjacent pairs of signed 8-bit integers in "src0" with corresponding unsigned 8-bit integers in "src1", producing 4 intermediate 32-bit results. Sum these 4 results with the corresponding 32-bit integer in "dst", and store the 32-bit result back to tile "dst". The shape of tile is specified in the struct of __tile1024i. The register of the tile is allocated by compiler. + + DEFINE DPBD(c, x, y) { + tmp1 := SignExtend32(x.byte[0]) * ZeroExtend32(y.byte[0]) + tmp2 := SignExtend32(x.byte[1]) * ZeroExtend32(y.byte[1]) + tmp3 := SignExtend32(x.byte[2]) * ZeroExtend32(y.byte[2]) + tmp4 := SignExtend32(x.byte[3]) * ZeroExtend32(y.byte[3]) + RETURN c + tmp1 + tmp2 + tmp3 + tmp4 +} +FOR m := 0 TO dst.rows - 1 + tmp := dst.row[m] + FOR k := 0 TO (src0.colsb / 4) - 1 + FOR n := 0 TO (dst.colsb / 4) - 1 tmp.dword[n] := DPBD(tmp.dword[n], src0.row[m].dword[k], src1.row[k].dword[n]) - ENDFOR - ENDFOR - write_row_and_zero(dst, m, tmp, dst.colsb) - ENDFOR - zero_upper_rows(dst, dst.rows) - zero_tileconfig_start() + ENDFOR + ENDFOR + write_row_and_zero(dst, m, tmp, dst.colsb) +ENDFOR +zero_upper_rows(dst, dst.rows) +zero_tileconfig_start() - - - - AMX-INT8 -
immintrin.h
- Application-Targeted + + + + AMX-INT8 +
immintrin.h
+ Application-Targeted
- - Compute dot-product of bytes in tiles with a source/destination accumulator. - Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "src0" with - corresponding signed 8-bit integers in "src1", producing 4 intermediate 32-bit results. - Sum these 4 results with the corresponding 32-bit integer in "dst", and store the 32-bit - result back to tile "dst". The shape of tile is specified in the struct of __tile1024i. - The register of the tile is allocated by compiler. - - DEFINE DPBD(c, x, y) { - tmp1 := ZeroExtend32(x.byte[0]) * SignExtend32(y.byte[0]) - tmp2 := ZeroExtend32(x.byte[1]) * SignExtend32(y.byte[1]) - tmp3 := ZeroExtend32(x.byte[2]) * SignExtend32(y.byte[2]) - tmp4 := ZeroExtend32(x.byte[3]) * SignExtend32(y.byte[3]) - RETURN c + tmp1 + tmp2 + tmp3 + tmp4 - } - FOR m := 0 TO dst.rows - 1 - tmp := dst.row[m] - FOR k := 0 TO (src0.colsb / 4) - 1 - FOR n := 0 TO (dst.colsb / 4) - 1 + + Compute dot-product of bytes in tiles with a source/destination accumulator. Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "src0" with corresponding signed 8-bit integers in "src1", producing 4 intermediate 32-bit results. Sum these 4 results with the corresponding 32-bit integer in "dst", and store the 32-bit result back to tile "dst". The shape of tile is specified in the struct of __tile1024i. The register of the tile is allocated by compiler. + + DEFINE DPBD(c, x, y) { + tmp1 := ZeroExtend32(x.byte[0]) * SignExtend32(y.byte[0]) + tmp2 := ZeroExtend32(x.byte[1]) * SignExtend32(y.byte[1]) + tmp3 := ZeroExtend32(x.byte[2]) * SignExtend32(y.byte[2]) + tmp4 := ZeroExtend32(x.byte[3]) * SignExtend32(y.byte[3]) + RETURN c + tmp1 + tmp2 + tmp3 + tmp4 +} +FOR m := 0 TO dst.rows - 1 + tmp := dst.row[m] + FOR k := 0 TO (src0.colsb / 4) - 1 + FOR n := 0 TO (dst.colsb / 4) - 1 tmp.dword[n] := DPBD(tmp.dword[n], src0.row[m].dword[k], src1.row[k].dword[n]) - ENDFOR - ENDFOR - write_row_and_zero(dst, m, tmp, dst.colsb) - ENDFOR - zero_upper_rows(dst, dst.rows) - zero_tileconfig_start() + ENDFOR + ENDFOR + write_row_and_zero(dst, m, tmp, dst.colsb) +ENDFOR +zero_upper_rows(dst, dst.rows) +zero_tileconfig_start() - - - - AMX-INT8 -
immintrin.h
- Application-Targeted + + + + AMX-INT8 +
immintrin.h
+ Application-Targeted
- - Compute dot-product of bytes in tiles with a source/destination accumulator. - Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "src0" with - corresponding unsigned 8-bit integers in "src1", producing 4 intermediate 32-bit - results. Sum these 4 results with the corresponding 32-bit integer in "dst", and store - the 32-bit result back to tile "dst". The shape of tile is specified in the struct of - __tile1024i. The register of the tile is allocated by compiler. - - DEFINE DPBD(c, x, y) { - tmp1 := ZeroExtend32(x.byte[0]) * ZeroExtend32(y.byte[0]) - tmp2 := ZeroExtend32(x.byte[1]) * ZeroExtend32(y.byte[1]) - tmp3 := ZeroExtend32(x.byte[2]) * ZeroExtend32(y.byte[2]) - tmp4 := ZeroExtend32(x.byte[3]) * ZeroExtend32(y.byte[3]) - RETURN c + tmp1 + tmp2 + tmp3 + tmp4 - } - FOR m := 0 TO dst.rows - 1 - tmp := dst.row[m] - FOR k := 0 TO (src0.colsb / 4) - 1 - FOR n := 0 TO (dst.colsb / 4) - 1 + + Compute dot-product of bytes in tiles with a source/destination accumulator. Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "src0" with corresponding unsigned 8-bit integers in "src1", producing 4 intermediate 32-bit results. Sum these 4 results with the corresponding 32-bit integer in "dst", and store the 32-bit result back to tile "dst". The shape of tile is specified in the struct of __tile1024i. The register of the tile is allocated by compiler. + + DEFINE DPBD(c, x, y) { + tmp1 := ZeroExtend32(x.byte[0]) * ZeroExtend32(y.byte[0]) + tmp2 := ZeroExtend32(x.byte[1]) * ZeroExtend32(y.byte[1]) + tmp3 := ZeroExtend32(x.byte[2]) * ZeroExtend32(y.byte[2]) + tmp4 := ZeroExtend32(x.byte[3]) * ZeroExtend32(y.byte[3]) + RETURN c + tmp1 + tmp2 + tmp3 + tmp4 +} +FOR m := 0 TO dst.rows - 1 + tmp := dst.row[m] + FOR k := 0 TO (src0.colsb / 4) - 1 + FOR n := 0 TO (dst.colsb / 4) - 1 tmp.dword[n] := DPBD(tmp.dword[n], src0.row[m].dword[k], src1.row[k].dword[n]) - ENDFOR - ENDFOR - write_row_and_zero(dst, m, tmp, dst.colsb) - ENDFOR - zero_upper_rows(dst, dst.rows) - zero_tileconfig_start() + ENDFOR + ENDFOR + write_row_and_zero(dst, m, tmp, dst.colsb) +ENDFOR +zero_upper_rows(dst, dst.rows) +zero_tileconfig_start() - - - - AMX-INT8 -
immintrin.h
- Application-Targeted -
- - - - - - Load tile configuration from a 64-byte memory location specified by "mem_addr". - The tile configuration format is specified below, and includes the tile type pallette, - the number of bytes per row, and the number of rows. If the specified pallette_id is - zero, that signifies the init state for both the tile config and the tile data, and the - tiles are zeroed. Any invalid configurations will result in #GP fault. - - // format of memory payload. each field is a byte. - // 0: palette - // 1: start_row - // 2-15: reserved, must be zero - // 16-17: tile0.colsb - // 18-19: tile1.colsb - // 20-21: tile2.colsb - // ... - // 30-31: tile7.colsb - // 32-47: reserved, must be zero - // 48: tile0.rows - // 49: tile1.rows - // 50: tile2.rows - // ... - // 55: tile7.rows - // 56-63: reserved, must be zero - - - AMX-TILE -
immintrin.h
- Application-Targeted + + + + AMX-INT8 +
immintrin.h
+ Application-Targeted +
+ + + + + Load tile configuration from a 64-byte memory location specified by "mem_addr". The tile configuration format is specified below, and includes the tile type pallette, the number of bytes per row, and the number of rows. If the specified pallette_id is zero, that signifies the init state for both the tile config and the tile data, and the tiles are zeroed. Any invalid configurations will result in #GP fault. + +// format of memory payload. each field is a byte. +// 0: palette +// 1: start_row +// 2-15: reserved, must be zero +// 16-17: tile0.colsb +// 18-19: tile1.colsb +// 20-21: tile2.colsb +// ... +// 30-31: tile7.colsb +// 32-47: reserved, must be zero +// 48: tile0.rows +// 49: tile1.rows +// 50: tile2.rows +// ... +// 55: tile7.rows +// 56-63: reserved, must be zero + + + AMX-TILE +
immintrin.h
+ Application-Targeted
- - - Stores the current tile configuration to a 64-byte memory location specified by - "mem_addr". The tile configuration format is specified below, and includes the tile type - pallette, the number of bytes per row, and the number of rows. If tiles are not - configured, all zeroes will be stored to memory. - - // format of memory payload. each field is a byte. - // 0: palette - // 1: start_row - // 2-15: reserved, must be zero - // 16-17: tile0.colsb - // 18-19: tile1.colsb - // 20-21: tile2.colsb - // ... - // 30-31: tile7.colsb - // 32-47: reserved, must be zero - // 48: tile0.rows - // 49: tile1.rows - // 50: tile2.rows - // ... - // 55: tile7.rows - // 56-63: reserved, must be zero - - - AMX-TILE -
immintrin.h
- Application-Targeted + + + Stores the current tile configuration to a 64-byte memory location specified by "mem_addr". The tile configuration format is specified below, and includes the tile type pallette, the number of bytes per row, and the number of rows. If tiles are not configured, all zeroes will be stored to memory. + +// format of memory payload. each field is a byte. +// 0: palette +// 1: start_row +// 2-15: reserved, must be zero +// 16-17: tile0.colsb +// 18-19: tile1.colsb +// 20-21: tile2.colsb +// ... +// 30-31: tile7.colsb +// 32-47: reserved, must be zero +// 48: tile0.rows +// 49: tile1.rows +// 50: tile2.rows +// ... +// 55: tile7.rows +// 56-63: reserved, must be zero + + + AMX-TILE +
immintrin.h
+ Application-Targeted
- - - - - Load tile rows from memory specifieid by "base" address and "stride" into - destination tile "dst" using the tile configuration previously configured via - "_tile_loadconfig". - start := tileconfig.startRow - IF start == 0 // not restarting, zero incoming state - tilezero(dst) - FI - nbytes := dst.colsb - DO WHILE start < dst.rows - memptr := base + start * stride - write_row_and_zero(dst, start, read_memory(memptr, nbytes), nbytes) - start := start + 1 - OD - zero_upper_rows(dst, dst.rows) - zero_tileconfig_start() - - - AMX-TILE -
immintrin.h
- Application-Targeted + + + + + Load tile rows from memory specifieid by "base" address and "stride" into destination tile "dst" using the tile configuration previously configured via "_tile_loadconfig". + start := tileconfig.startRow +IF start == 0 // not restarting, zero incoming state + tilezero(dst) +FI +nbytes := dst.colsb +DO WHILE start < dst.rows + memptr := base + start * stride + write_row_and_zero(dst, start, read_memory(memptr, nbytes), nbytes) + start := start + 1 +OD +zero_upper_rows(dst, dst.rows) +zero_tileconfig_start() + + + AMX-TILE +
immintrin.h
+ Application-Targeted
- - - - - Load tile rows from memory specifieid by "base" address and "stride" into - destination tile "dst" using the tile configuration previously configured via - "_tile_loadconfig". This intrinsic provides a hint to the implementation that the data - will likely not be reused in the near future and the data caching can be optimized - accordingly. - start := tileconfig.startRow - IF start == 0 // not restarting, zero incoming state - tilezero(dst) - FI - nbytes := dst.colsb - DO WHILE start < dst.rows - memptr := base + start * stride - write_row_and_zero(dst, start, read_memory(memptr, nbytes), nbytes) - start := start + 1 - OD - zero_upper_rows(dst, dst.rows) - zero_tileconfig_start() - - - AMX-TILE -
immintrin.h
- Application-Targeted + + + + + Load tile rows from memory specifieid by "base" address and "stride" into destination tile "dst" using the tile configuration previously configured via "_tile_loadconfig". This intrinsic provides a hint to the implementation that the data will likely not be reused in the near future and the data caching can be optimized accordingly. + start := tileconfig.startRow +IF start == 0 // not restarting, zero incoming state + tilezero(dst) +FI +nbytes := dst.colsb +DO WHILE start < dst.rows + memptr := base + start * stride + write_row_and_zero(dst, start, read_memory(memptr, nbytes), nbytes) + start := start + 1 +OD +zero_upper_rows(dst, dst.rows) +zero_tileconfig_start() + + + AMX-TILE +
immintrin.h
+ Application-Targeted
- - Release the tile configuration to return to the init state, which releases all - storage it currently holds. - - AMX-TILE -
immintrin.h
- Application-Targeted + + Release the tile configuration to return to the init state, which releases all storage it currently holds. + + AMX-TILE +
immintrin.h
+ Application-Targeted
- - - - - Store the tile specified by "src" to memory specifieid by "base" address and - "stride" using the tile configuration previously configured via "_tile_loadconfig". - start := tileconfig.startRow - DO WHILE start < src.rows - memptr := base + start * stride - write_memory(memptr, src.colsb, src.row[start]) - start := start + 1 - OD - zero_tileconfig_start() - - - AMX-TILE -
immintrin.h
- Application-Targeted + + + + + Store the tile specified by "src" to memory specifieid by "base" address and "stride" using the tile configuration previously configured via "_tile_loadconfig". + start := tileconfig.startRow +DO WHILE start < src.rows + memptr := base + start * stride + write_memory(memptr, src.colsb, src.row[start]) + start := start + 1 +OD +zero_tileconfig_start() + + + AMX-TILE +
immintrin.h
+ Application-Targeted
- - - Zero the tile specified by "tdest". - nbytes := palette_table[tileconfig.palette_id].bytes_per_row - FOR i := 0 TO palette_table[tileconfig.palette_id].max_rows-1 - FOR j := 0 TO nbytes-1 - tdest.row[i].byte[j] := 0 - ENDFOR - ENDFOR - - - AMX-TILE -
immintrin.h
- Application-Targeted + + + Zero the tile specified by "tdest". + nbytes := palette_table[tileconfig.palette_id].bytes_per_row +FOR i := 0 TO palette_table[tileconfig.palette_id].max_rows-1 + FOR j := 0 TO nbytes-1 + tdest.row[i].byte[j] := 0 + ENDFOR +ENDFOR + + + AMX-TILE +
immintrin.h
+ Application-Targeted
- - Load tile rows from memory specifieid by "base" address and "stride" into - destination tile "dst". The shape of tile is specified in the struct of __tile1024i. The - register of the tile is allocated by compiler. - - start := tileconfig.startRow - IF start == 0 // not restarting, zero incoming state - tilezero(dst) - FI - nbytes := dst.colsb - DO WHILE start < dst.rows - memptr := base + start * stride - write_row_and_zero(dst, start, read_memory(memptr, nbytes), nbytes) - start := start + 1 - OD - zero_upper_rows(dst, dst.rows) - zero_tileconfig_start() + + Load tile rows from memory specifieid by "base" address and "stride" into destination tile "dst". The shape of tile is specified in the struct of __tile1024i. The register of the tile is allocated by compiler. + + start := tileconfig.startRow +IF start == 0 // not restarting, zero incoming state + tilezero(dst) +FI +nbytes := dst.colsb +DO WHILE start < dst.rows + memptr := base + start * stride + write_row_and_zero(dst, start, read_memory(memptr, nbytes), nbytes) + start := start + 1 +OD +zero_upper_rows(dst, dst.rows) +zero_tileconfig_start() - - - - AMX-TILE -
immintrin.h
- Application-Targeted + + + + AMX-TILE +
immintrin.h
+ Application-Targeted
- - Store the tile specified by "src" to memory specifieid by "base" address and - "stride". The shape of tile is specified in the struct of __tile1024i. The register of - the tile is allocated by compiler. - - start := tileconfig.startRow - DO WHILE start < src.rows - memptr := base + start * stride - write_memory(memptr, src.colsb, src.row[start]) - start := start + 1 - OD - zero_tileconfig_start() + + Store the tile specified by "src" to memory specifieid by "base" address and "stride". The shape of tile is specified in the struct of __tile1024i. The register of the tile is allocated by compiler. + + start := tileconfig.startRow +DO WHILE start < src.rows + memptr := base + start * stride + write_memory(memptr, src.colsb, src.row[start]) + start := start + 1 +OD +zero_tileconfig_start() - - - - AMX-TILE -
immintrin.h
- Application-Targeted + + + + AMX-TILE +
immintrin.h
+ Application-Targeted
- - Load tile rows from memory specifieid by "base" address and "stride" into - destination tile "dst". This intrinsic provides a hint to the implementation that the - data will likely not be reused in the near future and the data caching can be optimized - accordingly. The shape of tile is specified in the struct of __tile1024i. The register - of the tile is allocated by compiler. - - start := tileconfig.startRow - IF start == 0 // not restarting, zero incoming state - tilezero(dst) - FI - nbytes := dst.colsb - DO WHILE start < dst.rows - memptr := base + start * stride - write_row_and_zero(dst, start, read_memory(memptr, nbytes), nbytes) - start := start + 1 - OD - zero_upper_rows(dst, dst.rows) - zero_tileconfig_start() + + Load tile rows from memory specifieid by "base" address and "stride" into destination tile "dst". This intrinsic provides a hint to the implementation that the data will likely not be reused in the near future and the data caching can be optimized accordingly. The shape of tile is specified in the struct of __tile1024i. The register of the tile is allocated by compiler. + + start := tileconfig.startRow +IF start == 0 // not restarting, zero incoming state + tilezero(dst) +FI +nbytes := dst.colsb +DO WHILE start < dst.rows + memptr := base + start * stride + write_row_and_zero(dst, start, read_memory(memptr, nbytes), nbytes) + start := start + 1 +OD +zero_upper_rows(dst, dst.rows) +zero_tileconfig_start() - - - - AMX-TILE -
immintrin.h
- Application-Targeted + + + + AMX-TILE +
immintrin.h
+ Application-Targeted
- - Zero the tile specified by "dst". The shape of tile is specified in the struct - of __tile1024i. The register of the tile is allocated by compiler. - - nbytes := palette_table[tileconfig.palette_id].bytes_per_row - FOR i := 0 TO palette_table[tileconfig.palette_id].max_rows-1 - FOR j := 0 TO nbytes-1 - tdest.row[i].byte[j] := 0 - ENDFOR - ENDFOR + + Zero the tile specified by "dst". The shape of tile is specified in the struct of __tile1024i. The register of the tile is allocated by compiler. + + nbytes := palette_table[tileconfig.palette_id].bytes_per_row +FOR i := 0 TO palette_table[tileconfig.palette_id].max_rows-1 + FOR j := 0 TO nbytes-1 + tdest.row[i].byte[j] := 0 + ENDFOR +ENDFOR - - AMX-TILE -
immintrin.h
- Application-Targeted -
- - - - - - Compute the inverse cosine of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := ACOS(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + AMX-TILE +
immintrin.h
+ Application-Targeted +
+ + + + + Compute the inverse cosine of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ACOS(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the inverse cosine of packed single-precision (32-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := ACOS(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the inverse cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ACOS(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the inverse hyperbolic cosine of packed double-precision (64-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := ACOSH(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the inverse hyperbolic cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ACOSH(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the inverse hyperbolic cosine of packed single-precision (32-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := ACOSH(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the inverse hyperbolic cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ACOSH(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the inverse sine of packed double-precision (64-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := ASIN(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the inverse sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ASIN(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the inverse sine of packed single-precision (32-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := ASIN(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the inverse sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ASIN(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the inverse hyperbolic sine of packed double-precision (64-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := ASINH(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the inverse hyperbolic sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ASINH(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the inverse hyperbolic sine of packed single-precision (32-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := ASINH(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the inverse hyperbolic sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ASINH(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the inverse tangent of packed double-precision (64-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := ATAN(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the inverse tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ATAN(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the inverse tangent of packed single-precision (32-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := ATAN(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the inverse tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ATAN(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - - Compute the inverse tangent of packed double-precision (64-bit) floating-point - elements in "a" divided by packed elements in "b", and store the results in "dst" - expressed in radians. - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := ATAN2(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + + Compute the inverse tangent of packed double-precision (64-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians. + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ATAN2(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - - Compute the inverse tangent of packed single-precision (32-bit) floating-point - elements in "a" divided by packed elements in "b", and store the results in "dst" - expressed in radians. - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := ATAN2(a[i+31:i], b[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + + Compute the inverse tangent of packed single-precision (32-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians. + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ATAN2(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the inverse hyperbolic tangent of packed double-precision (64-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := ATANH(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the inverse hyperbolic tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ATANH(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the inverse hyperbolic tangent of packed single-precision (32-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := ATANH(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the inverse hyperbolic tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ATANH(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the cosine of packed double-precision (64-bit) floating-point elements - in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := COS(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := COS(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the cosine of packed single-precision (32-bit) floating-point elements - in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := COS(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := COS(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the cosine of packed double-precision (64-bit) floating-point elements - in "a" expressed in degrees, and store the results in "dst". - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := COSD(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := COSD(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the cosine of packed single-precision (32-bit) floating-point elements - in "a" expressed in degrees, and store the results in "dst". - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := COSD(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := COSD(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the hyperbolic cosine of packed double-precision (64-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := COSH(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the hyperbolic cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := COSH(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the hyperbolic cosine of packed single-precision (32-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := COSH(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the hyperbolic cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := COSH(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - - Compute the length of the hypotenous of a right triangle, with the lengths of - the other two sides of the triangle stored as packed double-precision (64-bit) - floating-point elements in "a" and "b", and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := SQRT(POW(a[i+63:i], 2.0) + POW(b[i+63:i], 2.0)) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + + Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := SQRT(POW(a[i+63:i], 2.0) + POW(b[i+63:i], 2.0)) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - - Compute the length of the hypotenous of a right triangle, with the lengths of - the other two sides of the triangle stored as packed single-precision (32-bit) - floating-point elements in "a" and "b", and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := SQRT(POW(a[i+31:i], 2.0) + POW(b[i+31:i], 2.0)) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + + Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := SQRT(POW(a[i+31:i], 2.0) + POW(b[i+31:i], 2.0)) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the sine of packed double-precision (64-bit) floating-point elements in - "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := SIN(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := SIN(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the sine of packed single-precision (32-bit) floating-point elements in - "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := SIN(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := SIN(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - - Compute the sine and cosine of packed double-precision (64-bit) floating-point - elements in "a" expressed in radians, store the sine in "dst", and store the cosine into - memory at "mem_addr". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := SIN(a[i+63:i]) - MEM[mem_addr+i+63:mem_addr+i] := COS(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + + Compute the sine and cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, store the sine in "dst", and store the cosine into memory at "mem_addr". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := SIN(a[i+63:i]) + MEM[mem_addr+i+63:mem_addr+i] := COS(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - - Compute the sine and cosine of packed single-precision (32-bit) floating-point - elements in "a" expressed in radians, store the sine in "dst", and store the cosine into - memory at "mem_addr". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := SIN(a[i+31:i]) - MEM[mem_addr+i+31:mem_addr+i] := COS(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + + Compute the sine and cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, store the sine in "dst", and store the cosine into memory at "mem_addr". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := SIN(a[i+31:i]) + MEM[mem_addr+i+31:mem_addr+i] := COS(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the sine of packed double-precision (64-bit) floating-point elements in - "a" expressed in degrees, and store the results in "dst". - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := SIND(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the sine of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := SIND(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the sine of packed single-precision (32-bit) floating-point elements in - "a" expressed in degrees, and store the results in "dst". - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := SIND(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the sine of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := SIND(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the hyperbolic sine of packed double-precision (64-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := SINH(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the hyperbolic sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := SINH(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the hyperbolic sine of packed single-precision (32-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := SINH(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the hyperbolic sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := SINH(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the tangent of packed double-precision (64-bit) floating-point elements - in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := TAN(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := TAN(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the tangent of packed single-precision (32-bit) floating-point elements - in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := TAN(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := TAN(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the tangent of packed double-precision (64-bit) floating-point elements - in "a" expressed in degrees, and store the results in "dst". - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := TAND(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := TAND(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the tangent of packed single-precision (32-bit) floating-point elements - in "a" expressed in degrees, and store the results in "dst". - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := TAND(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := TAND(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the hyperbolic tangent of packed double-precision (64-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := TANH(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the hyperbolic tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := TANH(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the hyperbolic tangent of packed single-precision (32-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := TANH(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Trigonometry + + + Compute the hyperbolic tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := TANH(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Trigonometry
- - - Compute the cube root of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst". - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := CubeRoot(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the cube root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := CubeRoot(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the cube root of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst". - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := CubeRoot(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the cube root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := CubeRoot(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the exponential value of "e" raised to the power of packed complex - numbers in "a", and store the complex results in "dst". Each complex number is composed - of two adjacent single-precision (32-bit) floating-point elements, which defines the - complex number "complex = vec.fp32[0] + i * vec.fp32[1]". - - DEFINE CEXP(a[31:0], b[31:0]) { - result[31:0] := POW(FP32(e), a[31:0]) * COS(b[31:0]) - result[63:32] := POW(FP32(e), a[31:0]) * SIN(b[31:0]) - RETURN result - } - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := CEXP(a[i+31:i], a[i+63:i+32]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the exponential value of "e" raised to the power of packed complex numbers in "a", and store the complex results in "dst". Each complex number is composed of two adjacent single-precision (32-bit) floating-point elements, which defines the complex number "complex = vec.fp32[0] + i * vec.fp32[1]". + +DEFINE CEXP(a[31:0], b[31:0]) { + result[31:0] := POW(FP32(e), a[31:0]) * COS(b[31:0]) + result[63:32] := POW(FP32(e), a[31:0]) * SIN(b[31:0]) + RETURN result +} +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := CEXP(a[i+31:i], a[i+63:i+32]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the natural logarithm of packed complex numbers in "a", and store the - complex results in "dst". Each complex number is composed of two adjacent - single-precision (32-bit) floating-point elements, which defines the complex number - "complex = vec.fp32[0] + i * vec.fp32[1]". - - DEFINE CLOG(a[31:0], b[31:0]) { - result[31:0] := LOG(SQRT(POW(a, 2.0) + POW(b, 2.0))) - result[63:32] := ATAN2(b, a) - RETURN result - } - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := CLOG(a[i+31:i], a[i+63:i+32]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the natural logarithm of packed complex numbers in "a", and store the complex results in "dst". Each complex number is composed of two adjacent single-precision (32-bit) floating-point elements, which defines the complex number "complex = vec.fp32[0] + i * vec.fp32[1]". + +DEFINE CLOG(a[31:0], b[31:0]) { + result[31:0] := LOG(SQRT(POW(a, 2.0) + POW(b, 2.0))) + result[63:32] := ATAN2(b, a) + RETURN result +} +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := CLOG(a[i+31:i], a[i+63:i+32]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the square root of packed complex snumbers in "a", and store the - complex results in "dst". Each complex number is composed of two adjacent - single-precision (32-bit) floating-point elements, which defines the complex number - "complex = vec.fp32[0] + i * vec.fp32[1]". - - DEFINE CSQRT(a[31:0], b[31:0]) { - sign[31:0] := (b < 0.0) ? -FP32(1.0) : FP32(1.0) - result[31:0] := SQRT((a + SQRT(POW(a, 2.0) + POW(b, 2.0))) / 2.0) - result[63:32] := sign * SQRT((-a + SQRT(POW(a, 2.0) + POW(b, 2.0))) / 2.0) - RETURN result - } - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := CSQRT(a[i+31:i], a[i+63:i+32]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the square root of packed complex snumbers in "a", and store the complex results in "dst". Each complex number is composed of two adjacent single-precision (32-bit) floating-point elements, which defines the complex number "complex = vec.fp32[0] + i * vec.fp32[1]". + +DEFINE CSQRT(a[31:0], b[31:0]) { + sign[31:0] := (b < 0.0) ? -FP32(1.0) : FP32(1.0) + result[31:0] := SQRT((a + SQRT(POW(a, 2.0) + POW(b, 2.0))) / 2.0) + result[63:32] := sign * SQRT((-a + SQRT(POW(a, 2.0) + POW(b, 2.0))) / 2.0) + RETURN result +} +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := CSQRT(a[i+31:i], a[i+63:i+32]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the exponential value of "e" raised to the power of packed - double-precision (64-bit) floating-point elements in "a", and store the results in - "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := POW(e, a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the exponential value of "e" raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := POW(e, a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the exponential value of "e" raised to the power of packed - single-precision (32-bit) floating-point elements in "a", and store the results in - "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := POW(FP32(e), a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the exponential value of "e" raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := POW(FP32(e), a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the exponential value of 10 raised to the power of packed - double-precision (64-bit) floating-point elements in "a", and store the results in - "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := POW(10.0, a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the exponential value of 10 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := POW(10.0, a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the exponential value of 10 raised to the power of packed - single-precision (32-bit) floating-point elements in "a", and store the results in - "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := POW(FP32(10.0), a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the exponential value of 10 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := POW(FP32(10.0), a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the exponential value of 2 raised to the power of packed - double-precision (64-bit) floating-point elements in "a", and store the results in - "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := POW(2.0, a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the exponential value of 2 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := POW(2.0, a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the exponential value of 2 raised to the power of packed - single-precision (32-bit) floating-point elements in "a", and store the results in - "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := POW(FP32(2.0), a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the exponential value of 2 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := POW(FP32(2.0), a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the exponential value of "e" raised to the power of packed - double-precision (64-bit) floating-point elements in "a", subtract one from each - element, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := POW(e, a[i+63:i]) - 1.0 - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the exponential value of "e" raised to the power of packed double-precision (64-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := POW(e, a[i+63:i]) - 1.0 +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the exponential value of "e" raised to the power of packed - single-precision (32-bit) floating-point elements in "a", subtract one from each - element, and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := POW(FP32(e), a[i+31:i]) - 1.0 - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the exponential value of "e" raised to the power of packed single-precision (32-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := POW(FP32(e), a[i+31:i]) - 1.0 +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the inverse cube root of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := InvCubeRoot(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the inverse cube root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := InvCubeRoot(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the inverse cube root of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := InvCubeRoot(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the inverse cube root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := InvCubeRoot(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the inverse square root of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := InvSQRT(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the inverse square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := InvSQRT(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the inverse square root of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := InvSQRT(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the inverse square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := InvSQRT(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the natural logarithm of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := LOG(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the natural logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := LOG(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the natural logarithm of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := LOG(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the natural logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := LOG(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the base-10 logarithm of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := LOG(a[i+63:i]) / LOG(10.0) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the base-10 logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := LOG(a[i+63:i]) / LOG(10.0) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the base-10 logarithm of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := LOG(a[i+31:i]) / LOG(10.0) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the base-10 logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := LOG(a[i+31:i]) / LOG(10.0) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the natural logarithm of one plus packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := LOG(1.0 + a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the natural logarithm of one plus packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := LOG(1.0 + a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the natural logarithm of one plus packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := LOG(1.0 + a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the natural logarithm of one plus packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := LOG(1.0 + a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the base-2 logarithm of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := LOG(a[i+63:i]) / LOG(2.0) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the base-2 logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := LOG(a[i+63:i]) / LOG(2.0) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the base-2 logarithm of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := LOG(a[i+31:i]) / LOG(2.0) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the base-2 logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := LOG(a[i+31:i]) / LOG(2.0) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Convert the exponent of each packed double-precision (64-bit) floating-point - element in "a" to a double-precision floating-point number representing the integer - exponent, and store the results in "dst". This intrinsic essentially calculates - "floor(log2(x))" for each element. - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := ConvertExpFP64(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Convert the exponent of each packed single-precision (32-bit) floating-point - element in "a" to a single-precision floating-point number representing the integer - exponent, and store the results in "dst". This intrinsic essentially calculates - "floor(log2(x))" for each element. - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := ConvertExpFP32(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - - Compute the exponential value of packed double-precision (64-bit) - floating-point elements in "a" raised by packed elements in "b", and store the results - in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := POW(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + + Compute the exponential value of packed double-precision (64-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := POW(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - - Compute the exponential value of packed single-precision (32-bit) - floating-point elements in "a" raised by packed elements in "b", and store the results - in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := POW(a[i+31:i], b[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + + Compute the exponential value of packed single-precision (32-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := POW(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the square root of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst". Note that this intrinsic is less - efficient than "_mm_sqrt_pd". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := SQRT(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". Note that this intrinsic is less efficient than "_mm_sqrt_pd". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := SQRT(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the square root of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst". Note that this intrinsic is less - efficient than "_mm_sqrt_ps". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := SQRT(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". Note that this intrinsic is less efficient than "_mm_sqrt_ps". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := SQRT(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the cumulative distribution function of packed double-precision - (64-bit) floating-point elements in "a" using the normal distribution, and store the - results in "dst". - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := CDFNormal(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Probability/Statistics + + + Compute the cumulative distribution function of packed double-precision (64-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". + FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := CDFNormal(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Probability/Statistics
- - - Compute the cumulative distribution function of packed single-precision - (32-bit) floating-point elements in "a" using the normal distribution, and store the - results in "dst". - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := CDFNormal(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Probability/Statistics + + + Compute the cumulative distribution function of packed single-precision (32-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". + FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := CDFNormal(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Probability/Statistics
- - - Compute the inverse cumulative distribution function of packed double-precision - (64-bit) floating-point elements in "a" using the normal distribution, and store the - results in "dst". - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := InverseCDFNormal(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Probability/Statistics + + + Compute the inverse cumulative distribution function of packed double-precision (64-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". + FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := InverseCDFNormal(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Probability/Statistics
- - - Compute the inverse cumulative distribution function of packed single-precision - (32-bit) floating-point elements in "a" using the normal distribution, and store the - results in "dst". - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := InverseCDFNormal(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Probability/Statistics + + + Compute the inverse cumulative distribution function of packed single-precision (32-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". + FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := InverseCDFNormal(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Probability/Statistics
- - - Compute the error function of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst". - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := ERF(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Probability/Statistics + + + Compute the error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ERF(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Probability/Statistics
- - - Compute the error function of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst". - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := ERF(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Probability/Statistics + + + Compute the error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ERF(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Probability/Statistics
- - - Compute the complementary error function of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := 1.0 - ERF(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Probability/Statistics + + + Compute the complementary error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := 1.0 - ERF(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Probability/Statistics
- - - Compute the complementary error function of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 7 - i := j*32 - dst[i+63:i] := 1.0 - ERF(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Probability/Statistics + + + Compute the complementary error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 7 + i := j*32 + dst[i+63:i] := 1.0 - ERF(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Probability/Statistics
- - - Compute the inverse complementary error function of packed double-precision - (64-bit) floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+63:i])) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Probability/Statistics + + + Compute the inverse complementary error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+63:i])) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Probability/Statistics
- - - Compute the inverse complementary error function of packed single-precision - (32-bit) floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 7 - i := j*32 - dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+31:i])) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Probability/Statistics + + + Compute the inverse complementary error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 7 + i := j*32 + dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+31:i])) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Probability/Statistics
- - - Compute the inverse error function of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := 1.0 / ERF(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Probability/Statistics + + + Compute the inverse error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := 1.0 / ERF(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Probability/Statistics
- - - Compute the inverse error function of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 7 - i := j*32 - dst[i+63:i] := 1.0 / ERF(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Probability/Statistics + + + Compute the inverse error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 7 + i := j*32 + dst[i+63:i] := 1.0 / ERF(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Probability/Statistics
- - - - Divide packed signed 8-bit integers in "a" by packed elements in "b", and store - the truncated results in "dst". - - FOR j := 0 to 31 - i := 8*j - IF b[i+7:i] == 0 - #DE - FI - dst[i+7:i] := Truncate8(a[i+7:i] / b[i+7:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Arithmetic + + + + Divide packed signed 8-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 31 + i := 8*j + IF b[i+7:i] == 0 + #DE + FI + dst[i+7:i] := Truncate8(a[i+7:i] / b[i+7:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Arithmetic
- - - - Divide packed signed 16-bit integers in "a" by packed elements in "b", and - store the truncated results in "dst". - - FOR j := 0 to 15 - i := 16*j - IF b[i+15:i] == 0 - #DE - FI - dst[i+15:i] := Truncate16(a[i+15:i] / b[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Arithmetic + + + + Divide packed signed 16-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 15 + i := 16*j + IF b[i+15:i] == 0 + #DE + FI + dst[i+15:i] := Truncate16(a[i+15:i] / b[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Arithmetic
- - - - Divide packed signed 32-bit integers in "a" by packed elements in "b", and - store the truncated results in "dst". - - FOR j := 0 to 7 - i := 32*j - IF b[i+31:i] == 0 - #DE - FI - dst[i+31:i] := Truncate32(a[i+31:i] / b[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Arithmetic + + + + Divide packed signed 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 7 + i := 32*j + IF b[i+31:i] == 0 + #DE + FI + dst[i+31:i] := Truncate32(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Arithmetic
- - - - Divide packed signed 64-bit integers in "a" by packed elements in "b", and - store the truncated results in "dst". - - FOR j := 0 to 3 - i := 64*j - IF b[i+63:i] == 0 - #DE - FI - dst[i+63:i] := Truncate64(a[i+63:i] / b[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Arithmetic + + + + Divide packed signed 64-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 3 + i := 64*j + IF b[i+63:i] == 0 + #DE + FI + dst[i+63:i] := Truncate64(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 8-bit integers in "a" by packed elements in "b", and - store the truncated results in "dst". - - FOR j := 0 to 31 - i := 8*j - IF b[i+7:i] == 0 - #DE - FI - dst[i+7:i] := Truncate8(a[i+7:i] / b[i+7:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 8-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 31 + i := 8*j + IF b[i+7:i] == 0 + #DE + FI + dst[i+7:i] := Truncate8(a[i+7:i] / b[i+7:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 16-bit integers in "a" by packed elements in "b", and - store the truncated results in "dst". - - FOR j := 0 to 15 - i := 16*j - IF b[i+15:i] == 0 - #DE - FI - dst[i+15:i] := Truncate16(a[i+15:i] / b[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 16-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 15 + i := 16*j + IF b[i+15:i] == 0 + #DE + FI + dst[i+15:i] := Truncate16(a[i+15:i] / b[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and - store the truncated results in "dst". - - FOR j := 0 to 7 - i := 32*j - IF b[i+31:i] == 0 - #DE - FI - dst[i+31:i] := Truncate32(a[i+31:i] / b[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 7 + i := 32*j + IF b[i+31:i] == 0 + #DE + FI + dst[i+31:i] := Truncate32(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 64-bit integers in "a" by packed elements in "b", and - store the truncated results in "dst". - - FOR j := 0 to 3 - i := 64*j - IF b[i+63:i] == 0 - #DE - FI - dst[i+63:i] := Truncate64(a[i+63:i] / b[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 64-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 3 + i := 64*j + IF b[i+63:i] == 0 + #DE + FI + dst[i+63:i] := Truncate64(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Arithmetic
- - - - Divide packed 32-bit integers in "a" by packed elements in "b", and store the - truncated results in "dst". - FOR j := 0 to 7 - i := 32*j - dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Arithmetic + + + + Divide packed 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Arithmetic
- - - - - Divide packed 32-bit integers in "a" by packed elements in "b", store the - truncated results in "dst", and store the remainders as packed 32-bit integers into - memory at "mem_addr". - FOR j := 0 to 7 - i := 32*j - dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) - MEM[mem_addr+i+31:mem_addr+i] := REMAINDER(a[i+31:i] / b[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Arithmetic + + + + + Divide packed 32-bit integers in "a" by packed elements in "b", store the truncated results in "dst", and store the remainders as packed 32-bit integers into memory at "mem_addr". + FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) + MEM[mem_addr+i+31:mem_addr+i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Arithmetic
- - - - Divide packed 32-bit integers in "a" by packed elements in "b", and store the - remainders as packed 32-bit integers in "dst". - FOR j := 0 to 7 - i := 32*j - dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Arithmetic + + + + Divide packed 32-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Arithmetic
- - - - Divide packed 8-bit integers in "a" by packed elements in "b", and store the - remainders as packed 32-bit integers in "dst". - FOR j := 0 to 31 - i := 8*j - dst[i+7:i] := REMAINDER(a[i+7:i] / b[i+7:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Arithmetic + + + + Divide packed 8-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + FOR j := 0 to 31 + i := 8*j + dst[i+7:i] := REMAINDER(a[i+7:i] / b[i+7:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Arithmetic
- - - - Divide packed 16-bit integers in "a" by packed elements in "b", and store the - remainders as packed 32-bit integers in "dst". - FOR j := 0 to 15 - i := 16*j - dst[i+15:i] := REMAINDER(a[i+15:i] / b[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Arithmetic + + + + Divide packed 16-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + FOR j := 0 to 15 + i := 16*j + dst[i+15:i] := REMAINDER(a[i+15:i] / b[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Arithmetic
- - - - Divide packed 32-bit integers in "a" by packed elements in "b", and store the - remainders as packed 32-bit integers in "dst". - FOR j := 0 to 7 - i := 32*j - dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Arithmetic + + + + Divide packed 32-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Arithmetic
- - - - Divide packed 64-bit integers in "a" by packed elements in "b", and store the - remainders as packed 32-bit integers in "dst". - FOR j := 0 to 3 - i := 64*j - dst[i+63:i] := REMAINDER(a[i+63:i] / b[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Arithmetic + + + + Divide packed 64-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + FOR j := 0 to 3 + i := 64*j + dst[i+63:i] := REMAINDER(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 8-bit integers in "a" by packed elements in "b", and - store the remainders as packed unsigned 32-bit integers in "dst". - FOR j := 0 to 31 - i := 8*j - dst[i+7:i] := REMAINDER(a[i+7:i] / b[i+7:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 8-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + FOR j := 0 to 31 + i := 8*j + dst[i+7:i] := REMAINDER(a[i+7:i] / b[i+7:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 16-bit integers in "a" by packed elements in "b", and - store the remainders as packed unsigned 32-bit integers in "dst". - FOR j := 0 to 15 - i := 16*j - dst[i+15:i] := REMAINDER(a[i+15:i] / b[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 16-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + FOR j := 0 to 15 + i := 16*j + dst[i+15:i] := REMAINDER(a[i+15:i] / b[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and - store the remainders as packed unsigned 32-bit integers in "dst". - FOR j := 0 to 7 - i := 32*j - dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 64-bit integers in "a" by packed elements in "b", and - store the remainders as packed unsigned 32-bit integers in "dst". - FOR j := 0 to 3 - i := 64*j - dst[i+63:i] := REMAINDER(a[i+63:i] / b[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 64-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + FOR j := 0 to 3 + i := 64*j + dst[i+63:i] := REMAINDER(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and - store the truncated results in "dst". - FOR j := 0 to 7 - i := 32*j - dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Arithmetic
- - - - - Divide packed unsigned 32-bit integers in "a" by packed elements in "b", store - the truncated results in "dst", and store the remainders as packed unsigned 32-bit - integers into memory at "mem_addr". - FOR j := 0 to 7 - i := 32*j - dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) - MEM[mem_addr+i+31:mem_addr+i] := REMAINDER(a[i+31:i] / b[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Arithmetic + + + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", store the truncated results in "dst", and store the remainders as packed unsigned 32-bit integers into memory at "mem_addr". + FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) + MEM[mem_addr+i+31:mem_addr+i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and - store the remainders as packed unsigned 32-bit integers in "dst". - FOR j := 0 to 7 - i := 32*j - dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Arithmetic
- - - Round the packed double-precision (64-bit) floating-point elements in "a" up to - an integer value, and store the results as packed double-precision floating-point - elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction. - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := CEIL(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Special Math Functions + + + Round the packed double-precision (64-bit) floating-point elements in "a" up to an integer value, and store the results as packed double-precision floating-point elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction. + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := CEIL(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Special Math Functions
- - - Round the packed single-precision (32-bit) floating-point elements in "a" up to - an integer value, and store the results as packed single-precision floating-point - elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction. - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := CEIL(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Special Math Functions + + + Round the packed single-precision (32-bit) floating-point elements in "a" up to an integer value, and store the results as packed single-precision floating-point elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction. + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := CEIL(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Special Math Functions
- - - Round the packed double-precision (64-bit) floating-point elements in "a" down - to an integer value, and store the results as packed double-precision floating-point - elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction. - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := FLOOR(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Special Math Functions + + + Round the packed double-precision (64-bit) floating-point elements in "a" down to an integer value, and store the results as packed double-precision floating-point elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction. + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := FLOOR(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Special Math Functions
- - - Round the packed single-precision (32-bit) floating-point elements in "a" down - to an integer value, and store the results as packed single-precision floating-point - elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction. - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := FLOOR(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Special Math Functions + + + Round the packed single-precision (32-bit) floating-point elements in "a" down to an integer value, and store the results as packed single-precision floating-point elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction. + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := FLOOR(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Special Math Functions
- - - Round the packed double-precision (64-bit) floating-point elements in "a" to - the nearest integer value, and store the results as packed double-precision - floating-point elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" - instruction. - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := ROUND(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Special Math Functions + + + Round the packed double-precision (64-bit) floating-point elements in "a" to the nearest integer value, and store the results as packed double-precision floating-point elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction. + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ROUND(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Special Math Functions
- - - Round the packed single-precision (32-bit) floating-point elements in "a" to - the nearest integer value, and store the results as packed single-precision - floating-point elements in "dst". This intrinsic may generate the "roundps"/"vroundps" - instruction. - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := ROUND(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Special Math Functions + + + Round the packed single-precision (32-bit) floating-point elements in "a" to the nearest integer value, and store the results as packed single-precision floating-point elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction. + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ROUND(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Special Math Functions
- - - Truncate the packed double-precision (64-bit) floating-point elements in "a", - and store the results as packed double-precision floating-point elements in "dst". This - intrinsic may generate the "roundpd"/"vroundpd" instruction. - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := TRUNCATE(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Miscellaneous + + + Truncate the packed double-precision (64-bit) floating-point elements in "a", and store the results as packed double-precision floating-point elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction. + FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := TRUNCATE(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Miscellaneous
- - - Truncate the packed single-precision (32-bit) floating-point elements in "a", - and store the results as packed single-precision floating-point elements in "dst". This - intrinsic may generate the "roundps"/"vroundps" instruction. - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := TRUNCATE(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Miscellaneous -
- - - - - - - Add packed double-precision (64-bit) floating-point elements in "a" and "b", - and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := a[i+63:i] + b[i+63:i] - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Arithmetic + + + Truncate the packed single-precision (32-bit) floating-point elements in "a", and store the results as packed single-precision floating-point elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction. + FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := TRUNCATE(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Miscellaneous +
+ + + + + + + Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[i+63:i] + b[i+63:i] +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Arithmetic
- - - - Add packed single-precision (32-bit) floating-point elements in "a" and "b", - and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := a[i+31:i] + b[i+31:i] - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Arithmetic + + + + Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := a[i+31:i] + b[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Arithmetic
- - - - Alternatively add and subtract packed double-precision (64-bit) floating-point - elements in "a" to/from packed elements in "b", and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - IF ((j & 1) == 0) - dst[i+63:i] := a[i+63:i] - b[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] + b[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Arithmetic + + + + Alternatively add and subtract packed double-precision (64-bit) floating-point elements in "a" to/from packed elements in "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF ((j & 1) == 0) + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + b[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Arithmetic
- - - - Alternatively add and subtract packed single-precision (32-bit) floating-point - elements in "a" to/from packed elements in "b", and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - IF ((j & 1) == 0) - dst[i+31:i] := a[i+31:i] - b[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] + b[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Arithmetic + + + + Alternatively add and subtract packed single-precision (32-bit) floating-point elements in "a" to/from packed elements in "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF ((j & 1) == 0) + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + b[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Arithmetic
- - - - Divide packed double-precision (64-bit) floating-point elements in "a" by - packed elements in "b", and store the results in "dst". - - FOR j := 0 to 3 - i := 64*j - dst[i+63:i] := a[i+63:i] / b[i+63:i] - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Arithmetic + + + + Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 3 + i := 64*j + dst[i+63:i] := a[i+63:i] / b[i+63:i] +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Arithmetic
- - - - Divide packed single-precision (32-bit) floating-point elements in "a" by - packed elements in "b", and store the results in "dst". - - FOR j := 0 to 7 - i := 32*j - dst[i+31:i] := a[i+31:i] / b[i+31:i] - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Arithmetic + + + + Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := a[i+31:i] / b[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Arithmetic
- - - - - Conditionally multiply the packed single-precision (32-bit) floating-point - elements in "a" and "b" using the high 4 bits in "imm8", sum the four products, and - conditionally store the sum in "dst" using the low 4 bits of "imm8". - - DEFINE DP(a[127:0], b[127:0], imm8[7:0]) { - FOR j := 0 to 3 - i := j*32 - IF imm8[(4+j)%8] + + + + + Conditionally multiply the packed single-precision (32-bit) floating-point elements in "a" and "b" using the high 4 bits in "imm8", sum the four products, and conditionally store the sum in "dst" using the low 4 bits of "imm8". + +DEFINE DP(a[127:0], b[127:0], imm8[7:0]) { + FOR j := 0 to 3 + i := j*32 + IF imm8[(4+j)%8] temp[i+31:i] := a[i+31:i] * b[i+31:i] - ELSE + ELSE temp[i+31:i] := FP32(0.0) - FI - ENDFOR - - sum[31:0] := (temp[127:96] + temp[95:64]) + (temp[63:32] + temp[31:0]) - - FOR j := 0 to 3 - i := j*32 - IF imm8[j%8] + FI + ENDFOR + + sum[31:0] := (temp[127:96] + temp[95:64]) + (temp[63:32] + temp[31:0]) + + FOR j := 0 to 3 + i := j*32 + IF imm8[j%8] tmpdst[i+31:i] := sum[31:0] - ELSE + ELSE tmpdst[i+31:i] := FP32(0.0) - FI - ENDFOR - RETURN tmpdst[127:0] - } - dst[127:0] := DP(a[127:0], b[127:0], imm8[7:0]) - dst[255:128] := DP(a[255:128], b[255:128], imm8[7:0]) - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Arithmetic + FI + ENDFOR + RETURN tmpdst[127:0] +} +dst[127:0] := DP(a[127:0], b[127:0], imm8[7:0]) +dst[255:128] := DP(a[255:128], b[255:128], imm8[7:0]) +dst[MAX:256] := 0 +
+ + AVX +
immintrin.h
+ Arithmetic
- - - - Horizontally add adjacent pairs of double-precision (64-bit) floating-point - elements in "a" and "b", and pack the results in "dst". - - dst[63:0] := a[127:64] + a[63:0] - dst[127:64] := b[127:64] + b[63:0] - dst[191:128] := a[255:192] + a[191:128] - dst[255:192] := b[255:192] + b[191:128] - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Arithmetic + + + + Horizontally add adjacent pairs of double-precision (64-bit) floating-point elements in "a" and "b", and pack the results in "dst". + +dst[63:0] := a[127:64] + a[63:0] +dst[127:64] := b[127:64] + b[63:0] +dst[191:128] := a[255:192] + a[191:128] +dst[255:192] := b[255:192] + b[191:128] +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Arithmetic
- - - - Horizontally add adjacent pairs of single-precision (32-bit) floating-point - elements in "a" and "b", and pack the results in "dst". - - dst[31:0] := a[63:32] + a[31:0] - dst[63:32] := a[127:96] + a[95:64] - dst[95:64] := b[63:32] + b[31:0] - dst[127:96] := b[127:96] + b[95:64] - dst[159:128] := a[191:160] + a[159:128] - dst[191:160] := a[255:224] + a[223:192] - dst[223:192] := b[191:160] + b[159:128] - dst[255:224] := b[255:224] + b[223:192] - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Arithmetic + + + + Horizontally add adjacent pairs of single-precision (32-bit) floating-point elements in "a" and "b", and pack the results in "dst". + +dst[31:0] := a[63:32] + a[31:0] +dst[63:32] := a[127:96] + a[95:64] +dst[95:64] := b[63:32] + b[31:0] +dst[127:96] := b[127:96] + b[95:64] +dst[159:128] := a[191:160] + a[159:128] +dst[191:160] := a[255:224] + a[223:192] +dst[223:192] := b[191:160] + b[159:128] +dst[255:224] := b[255:224] + b[223:192] +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Arithmetic
- - - - Horizontally subtract adjacent pairs of double-precision (64-bit) - floating-point elements in "a" and "b", and pack the results in "dst". - - dst[63:0] := a[63:0] - a[127:64] - dst[127:64] := b[63:0] - b[127:64] - dst[191:128] := a[191:128] - a[255:192] - dst[255:192] := b[191:128] - b[255:192] - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Arithmetic + + + + Horizontally subtract adjacent pairs of double-precision (64-bit) floating-point elements in "a" and "b", and pack the results in "dst". + +dst[63:0] := a[63:0] - a[127:64] +dst[127:64] := b[63:0] - b[127:64] +dst[191:128] := a[191:128] - a[255:192] +dst[255:192] := b[191:128] - b[255:192] +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Arithmetic
- - - - Horizontally subtract adjacent pairs of single-precision (32-bit) - floating-point elements in "a" and "b", and pack the results in "dst". - - dst[31:0] := a[31:0] - a[63:32] - dst[63:32] := a[95:64] - a[127:96] - dst[95:64] := b[31:0] - b[63:32] - dst[127:96] := b[95:64] - b[127:96] - dst[159:128] := a[159:128] - a[191:160] - dst[191:160] := a[223:192] - a[255:224] - dst[223:192] := b[159:128] - b[191:160] - dst[255:224] := b[223:192] - b[255:224] - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Arithmetic + + + + Horizontally subtract adjacent pairs of single-precision (32-bit) floating-point elements in "a" and "b", and pack the results in "dst". + +dst[31:0] := a[31:0] - a[63:32] +dst[63:32] := a[95:64] - a[127:96] +dst[95:64] := b[31:0] - b[63:32] +dst[127:96] := b[95:64] - b[127:96] +dst[159:128] := a[159:128] - a[191:160] +dst[191:160] := a[223:192] - a[255:224] +dst[223:192] := b[159:128] - b[191:160] +dst[255:224] := b[223:192] - b[255:224] +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Arithmetic
- - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := a[i+63:i] * b[i+63:i] - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Arithmetic + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[i+63:i] * b[i+63:i] +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Arithmetic
- - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := a[i+31:i] * b[i+31:i] - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Arithmetic + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := a[i+31:i] * b[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Arithmetic
- - - - Subtract packed double-precision (64-bit) floating-point elements in "b" from - packed double-precision (64-bit) floating-point elements in "a", and store the results - in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := a[i+63:i] - b[i+63:i] - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Arithmetic + + + + Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[i+63:i] - b[i+63:i] +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Arithmetic
- - - - Subtract packed single-precision (32-bit) floating-point elements in "b" from - packed single-precision (32-bit) floating-point elements in "a", and store the results - in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := a[i+31:i] - b[i+31:i] - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Arithmetic + + + + Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := a[i+31:i] - b[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Arithmetic
- - - - Compute the bitwise AND of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Logical + + + + Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Logical
- - - - Compute the bitwise AND of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Logical + + + + Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Logical
- - - - Compute the bitwise NOT of packed double-precision (64-bit) floating-point - elements in "a" and then AND with "b", and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Logical + + + + Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Logical
- - - - Compute the bitwise NOT of packed single-precision (32-bit) floating-point - elements in "a" and then AND with "b", and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Logical + + + + Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Logical
- - - - Compute the bitwise OR of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := a[i+63:i] OR b[i+63:i] - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Logical + + + + Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[i+63:i] OR b[i+63:i] +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Logical
- - - - Compute the bitwise OR of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := a[i+31:i] OR b[i+31:i] - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Logical + + + + Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := a[i+31:i] OR b[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Logical
- - - - Compute the bitwise XOR of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := a[i+63:i] XOR b[i+63:i] - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Logical + + + + Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Logical
- - - - Compute the bitwise XOR of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := a[i+31:i] XOR b[i+31:i] - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Logical + + + + Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Logical
- - - - Compute the bitwise AND of 256 bits (representing integer data) in "a" and "b", - and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise - NOT of "a" and then AND with "b", and set "CF" to 1 if the result is zero, otherwise set - "CF" to 0. Return the "ZF" value. - - IF ((a[255:0] AND b[255:0]) == 0) - ZF := 1 - ELSE - ZF := 0 - FI - IF (((NOT a[255:0]) AND b[255:0]) == 0) - CF := 1 - ELSE - CF := 0 - FI - RETURN ZF - - - AVX -
immintrin.h
- Logical + + + + Compute the bitwise AND of 256 bits (representing integer data) in "a" and "b", and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", and set "CF" to 1 if the result is zero, otherwise set "CF" to 0. Return the "ZF" value. + +IF ((a[255:0] AND b[255:0]) == 0) + ZF := 1 +ELSE + ZF := 0 +FI +IF (((NOT a[255:0]) AND b[255:0]) == 0) + CF := 1 +ELSE + CF := 0 +FI +RETURN ZF + + + AVX +
immintrin.h
+ Logical
- - - - Compute the bitwise AND of 256 bits (representing integer data) in "a" and "b", - and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise - NOT of "a" and then AND with "b", and set "CF" to 1 if the result is zero, otherwise set - "CF" to 0. Return the "CF" value. - - IF ((a[255:0] AND b[255:0]) == 0) - ZF := 1 - ELSE - ZF := 0 - FI - IF (((NOT a[255:0]) AND b[255:0]) == 0) - CF := 1 - ELSE - CF := 0 - FI - RETURN CF - - - AVX -
immintrin.h
- Logical + + + + Compute the bitwise AND of 256 bits (representing integer data) in "a" and "b", and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", and set "CF" to 1 if the result is zero, otherwise set "CF" to 0. Return the "CF" value. + +IF ((a[255:0] AND b[255:0]) == 0) + ZF := 1 +ELSE + ZF := 0 +FI +IF (((NOT a[255:0]) AND b[255:0]) == 0) + CF := 1 +ELSE + CF := 0 +FI +RETURN CF + + + AVX +
immintrin.h
+ Logical
- - - - Compute the bitwise AND of 256 bits (representing integer data) in "a" and "b", - and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise - NOT of "a" and then AND with "b", and set "CF" to 1 if the result is zero, otherwise set - "CF" to 0. Return 1 if both the "ZF" and "CF" values are zero, otherwise return 0. - - IF ((a[255:0] AND b[255:0]) == 0) - ZF := 1 - ELSE - ZF := 0 - FI - IF (((NOT a[255:0]) AND b[255:0]) == 0) - CF := 1 - ELSE - CF := 0 - FI - IF (ZF == 0 && CF == 0) - dst := 1 - ELSE - dst := 0 - FI - - - AVX -
immintrin.h
- Logical + + + + Compute the bitwise AND of 256 bits (representing integer data) in "a" and "b", and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", and set "CF" to 1 if the result is zero, otherwise set "CF" to 0. Return 1 if both the "ZF" and "CF" values are zero, otherwise return 0. + +IF ((a[255:0] AND b[255:0]) == 0) + ZF := 1 +ELSE + ZF := 0 +FI +IF (((NOT a[255:0]) AND b[255:0]) == 0) + CF := 1 +ELSE + CF := 0 +FI +IF (ZF == 0 && CF == 0) + dst := 1 +ELSE + dst := 0 +FI + + + AVX +
immintrin.h
+ Logical
- - - - Compute the bitwise AND of 256 bits (representing double-precision (64-bit) - floating-point elements) in "a" and "b", producing an intermediate 256-bit value, and - set "ZF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, - otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing - an intermediate value, and set "CF" to 1 if the sign bit of each 64-bit element in the - intermediate value is zero, otherwise set "CF" to 0. Return the "ZF" value. - - tmp[255:0] := a[255:0] AND b[255:0] - IF (tmp[63] == 0 && tmp[127] == 0 && tmp[191] == 0 && tmp[255] - == 0) - ZF := 1 - ELSE - ZF := 0 - FI - tmp[255:0] := (NOT a[255:0]) AND b[255:0] - IF (tmp[63] == 0 && tmp[127] == 0 && tmp[191] == 0 && tmp[255] - == 0) - CF := 1 - ELSE - CF := 0 - FI - dst := ZF - - - AVX -
immintrin.h
- Logical + + + + Compute the bitwise AND of 256 bits (representing double-precision (64-bit) floating-point elements) in "a" and "b", producing an intermediate 256-bit value, and set "ZF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return the "ZF" value. + +tmp[255:0] := a[255:0] AND b[255:0] +IF (tmp[63] == 0 && tmp[127] == 0 && tmp[191] == 0 && tmp[255] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +tmp[255:0] := (NOT a[255:0]) AND b[255:0] +IF (tmp[63] == 0 && tmp[127] == 0 && tmp[191] == 0 && tmp[255] == 0) + CF := 1 +ELSE + CF := 0 +FI +dst := ZF + + + AVX +
immintrin.h
+ Logical
- - - - Compute the bitwise AND of 256 bits (representing double-precision (64-bit) - floating-point elements) in "a" and "b", producing an intermediate 256-bit value, and - set "ZF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, - otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing - an intermediate value, and set "CF" to 1 if the sign bit of each 64-bit element in the - intermediate value is zero, otherwise set "CF" to 0. Return the "CF" value. - - tmp[255:0] := a[255:0] AND b[255:0] - IF (tmp[63] == 0 && tmp[127] == 0 && tmp[191] == 0 && tmp[255] - == 0) - ZF := 1 - ELSE - ZF := 0 - FI - tmp[255:0] := (NOT a[255:0]) AND b[255:0] - IF (tmp[63] == 0 && tmp[127] == 0 && tmp[191] == 0 && tmp[255] - == 0) - CF := 1 - ELSE - CF := 0 - FI - dst := CF - - - AVX -
immintrin.h
- Logical + + + + Compute the bitwise AND of 256 bits (representing double-precision (64-bit) floating-point elements) in "a" and "b", producing an intermediate 256-bit value, and set "ZF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return the "CF" value. + +tmp[255:0] := a[255:0] AND b[255:0] +IF (tmp[63] == 0 && tmp[127] == 0 && tmp[191] == 0 && tmp[255] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +tmp[255:0] := (NOT a[255:0]) AND b[255:0] +IF (tmp[63] == 0 && tmp[127] == 0 && tmp[191] == 0 && tmp[255] == 0) + CF := 1 +ELSE + CF := 0 +FI +dst := CF + + + AVX +
immintrin.h
+ Logical
- - - - Compute the bitwise AND of 256 bits (representing double-precision (64-bit) - floating-point elements) in "a" and "b", producing an intermediate 256-bit value, and - set "ZF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, - otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing - an intermediate value, and set "CF" to 1 if the sign bit of each 64-bit element in the - intermediate value is zero, otherwise set "CF" to 0. Return 1 if both the "ZF" and "CF" - values are zero, otherwise return 0. - - tmp[255:0] := a[255:0] AND b[255:0] - IF (tmp[63] == 0 && tmp[127] == 0 && tmp[191] == 0 && tmp[255] - == 0) - ZF := 1 - ELSE - ZF := 0 - FI - tmp[255:0] := (NOT a[255:0]) AND b[255:0] - IF (tmp[63] == 0 && tmp[127] == 0 && tmp[191] == 0 && tmp[255] - == 0) - CF := 1 - ELSE - CF := 0 - FI - IF (ZF == 0 && CF == 0) - dst := 1 - ELSE - dst := 0 - FI - - - AVX -
immintrin.h
- Logical + + + + Compute the bitwise AND of 256 bits (representing double-precision (64-bit) floating-point elements) in "a" and "b", producing an intermediate 256-bit value, and set "ZF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return 1 if both the "ZF" and "CF" values are zero, otherwise return 0. + +tmp[255:0] := a[255:0] AND b[255:0] +IF (tmp[63] == 0 && tmp[127] == 0 && tmp[191] == 0 && tmp[255] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +tmp[255:0] := (NOT a[255:0]) AND b[255:0] +IF (tmp[63] == 0 && tmp[127] == 0 && tmp[191] == 0 && tmp[255] == 0) + CF := 1 +ELSE + CF := 0 +FI +IF (ZF == 0 && CF == 0) + dst := 1 +ELSE + dst := 0 +FI + + + AVX +
immintrin.h
+ Logical
- - - - Compute the bitwise AND of 128 bits (representing double-precision (64-bit) - floating-point elements) in "a" and "b", producing an intermediate 128-bit value, and - set "ZF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, - otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing - an intermediate value, and set "CF" to 1 if the sign bit of each 64-bit element in the - intermediate value is zero, otherwise set "CF" to 0. Return the "ZF" value. - - tmp[127:0] := a[127:0] AND b[127:0] - IF (tmp[63] == 0 && tmp[127] == 0) - ZF := 1 - ELSE - ZF := 0 - FI - tmp[127:0] := (NOT a[127:0]) AND b[127:0] - IF (tmp[63] == 0 && tmp[127] == 0) - CF := 1 - ELSE - CF := 0 - FI - dst := ZF - - - AVX -
immintrin.h
- Logical + + + + Compute the bitwise AND of 128 bits (representing double-precision (64-bit) floating-point elements) in "a" and "b", producing an intermediate 128-bit value, and set "ZF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return the "ZF" value. + +tmp[127:0] := a[127:0] AND b[127:0] +IF (tmp[63] == 0 && tmp[127] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +tmp[127:0] := (NOT a[127:0]) AND b[127:0] +IF (tmp[63] == 0 && tmp[127] == 0) + CF := 1 +ELSE + CF := 0 +FI +dst := ZF + + + AVX +
immintrin.h
+ Logical
- - - - Compute the bitwise AND of 128 bits (representing double-precision (64-bit) - floating-point elements) in "a" and "b", producing an intermediate 128-bit value, and - set "ZF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, - otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing - an intermediate value, and set "CF" to 1 if the sign bit of each 64-bit element in the - intermediate value is zero, otherwise set "CF" to 0. Return the "CF" value. - - tmp[127:0] := a[127:0] AND b[127:0] - IF (tmp[63] == 0 && tmp[127] == 0) - ZF := 1 - ELSE - ZF := 0 - FI - tmp[127:0] := (NOT a[127:0]) AND b[127:0] - IF (tmp[63] == 0 && tmp[127] == 0) - CF := 1 - ELSE - CF := 0 - FI - dst := CF - - - AVX -
immintrin.h
- Logical + + + + Compute the bitwise AND of 128 bits (representing double-precision (64-bit) floating-point elements) in "a" and "b", producing an intermediate 128-bit value, and set "ZF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return the "CF" value. + +tmp[127:0] := a[127:0] AND b[127:0] +IF (tmp[63] == 0 && tmp[127] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +tmp[127:0] := (NOT a[127:0]) AND b[127:0] +IF (tmp[63] == 0 && tmp[127] == 0) + CF := 1 +ELSE + CF := 0 +FI +dst := CF + + + AVX +
immintrin.h
+ Logical
- - - - Compute the bitwise AND of 128 bits (representing double-precision (64-bit) - floating-point elements) in "a" and "b", producing an intermediate 128-bit value, and - set "ZF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, - otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing - an intermediate value, and set "CF" to 1 if the sign bit of each 64-bit element in the - intermediate value is zero, otherwise set "CF" to 0. Return 1 if both the "ZF" and "CF" - values are zero, otherwise return 0. - - tmp[127:0] := a[127:0] AND b[127:0] - IF (tmp[63] == 0 && tmp[127] == 0) - ZF := 1 - ELSE - ZF := 0 - FI - tmp[127:0] := (NOT a[127:0]) AND b[127:0] - IF (tmp[63] == 0 && tmp[127] == 0) - CF := 1 - ELSE - CF := 0 - FI - IF (ZF == 0 && CF == 0) - dst := 1 - ELSE - dst := 0 - FI - - - AVX -
immintrin.h
- Logical + + + + Compute the bitwise AND of 128 bits (representing double-precision (64-bit) floating-point elements) in "a" and "b", producing an intermediate 128-bit value, and set "ZF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return 1 if both the "ZF" and "CF" values are zero, otherwise return 0. + +tmp[127:0] := a[127:0] AND b[127:0] +IF (tmp[63] == 0 && tmp[127] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +tmp[127:0] := (NOT a[127:0]) AND b[127:0] +IF (tmp[63] == 0 && tmp[127] == 0) + CF := 1 +ELSE + CF := 0 +FI +IF (ZF == 0 && CF == 0) + dst := 1 +ELSE + dst := 0 +FI + + + AVX +
immintrin.h
+ Logical
- - - - Compute the bitwise AND of 256 bits (representing single-precision (32-bit) - floating-point elements) in "a" and "b", producing an intermediate 256-bit value, and - set "ZF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, - otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing - an intermediate value, and set "CF" to 1 if the sign bit of each 32-bit element in the - intermediate value is zero, otherwise set "CF" to 0. Return the "ZF" value. - - tmp[255:0] := a[255:0] AND b[255:0] - IF (tmp[31] == 0 && tmp[63] == 0 && tmp[95] == 0 && tmp[127] == - 0 && \ - tmp[159] == 0 && tmp[191] == 0 && tmp[223] == 0 && tmp[255] == - 0) - ZF := 1 - ELSE - ZF := 0 - FI - tmp[255:0] := (NOT a[255:0]) AND b[255:0] - IF (tmp[31] == 0 && tmp[63] == 0 && tmp[95] == 0 && tmp[127] == - 0 && \ - tmp[159] == 0 && tmp[191] == 0 && tmp[223] == 0 && tmp[255] == - 0) - CF := 1 - ELSE - CF := 0 - FI - dst := ZF - - - AVX -
immintrin.h
- Logical + + + + Compute the bitwise AND of 256 bits (representing single-precision (32-bit) floating-point elements) in "a" and "b", producing an intermediate 256-bit value, and set "ZF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return the "ZF" value. + +tmp[255:0] := a[255:0] AND b[255:0] +IF (tmp[31] == 0 && tmp[63] == 0 && tmp[95] == 0 && tmp[127] == 0 && \ + tmp[159] == 0 && tmp[191] == 0 && tmp[223] == 0 && tmp[255] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +tmp[255:0] := (NOT a[255:0]) AND b[255:0] +IF (tmp[31] == 0 && tmp[63] == 0 && tmp[95] == 0 && tmp[127] == 0 && \ + tmp[159] == 0 && tmp[191] == 0 && tmp[223] == 0 && tmp[255] == 0) + CF := 1 +ELSE + CF := 0 +FI +dst := ZF + + + AVX +
immintrin.h
+ Logical
- - - - Compute the bitwise AND of 256 bits (representing single-precision (32-bit) - floating-point elements) in "a" and "b", producing an intermediate 256-bit value, and - set "ZF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, - otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing - an intermediate value, and set "CF" to 1 if the sign bit of each 32-bit element in the - intermediate value is zero, otherwise set "CF" to 0. Return the "CF" value. - - tmp[255:0] := a[255:0] AND b[255:0] - IF (tmp[31] == 0 && tmp[63] == 0 && tmp[95] == 0 && tmp[127] == - 0 && \ - tmp[159] == 0 && tmp[191] == 0 && tmp[223] == 0 && tmp[255] == - 0) - ZF := 1 - ELSE - ZF := 0 - FI - tmp[255:0] := (NOT a[255:0]) AND b[255:0] - IF (tmp[31] == 0 && tmp[63] == 0 && tmp[95] == 0 && tmp[127] == - 0 && \ - tmp[159] == 0 && tmp[191] == 0 && tmp[223] == 0 && tmp[255] == - 0) - CF := 1 - ELSE - CF := 0 - FI - dst := CF - - - AVX -
immintrin.h
- Logical + + + + Compute the bitwise AND of 256 bits (representing single-precision (32-bit) floating-point elements) in "a" and "b", producing an intermediate 256-bit value, and set "ZF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return the "CF" value. + +tmp[255:0] := a[255:0] AND b[255:0] +IF (tmp[31] == 0 && tmp[63] == 0 && tmp[95] == 0 && tmp[127] == 0 && \ + tmp[159] == 0 && tmp[191] == 0 && tmp[223] == 0 && tmp[255] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +tmp[255:0] := (NOT a[255:0]) AND b[255:0] +IF (tmp[31] == 0 && tmp[63] == 0 && tmp[95] == 0 && tmp[127] == 0 && \ + tmp[159] == 0 && tmp[191] == 0 && tmp[223] == 0 && tmp[255] == 0) + CF := 1 +ELSE + CF := 0 +FI +dst := CF + + + AVX +
immintrin.h
+ Logical
- - - - Compute the bitwise AND of 256 bits (representing single-precision (32-bit) - floating-point elements) in "a" and "b", producing an intermediate 256-bit value, and - set "ZF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, - otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing - an intermediate value, and set "CF" to 1 if the sign bit of each 32-bit element in the - intermediate value is zero, otherwise set "CF" to 0. Return 1 if both the "ZF" and "CF" - values are zero, otherwise return 0. - - tmp[255:0] := a[255:0] AND b[255:0] - IF (tmp[31] == 0 && tmp[63] == 0 && tmp[95] == 0 && tmp[127] == - 0 && \ - tmp[159] == 0 && tmp[191] == 0 && tmp[223] == 0 && tmp[255] == - 0) - ZF := 1 - ELSE - ZF := 0 - FI - tmp[255:0] := (NOT a[255:0]) AND b[255:0] - IF (tmp[31] == 0 && tmp[63] == 0 && tmp[95] == 0 && tmp[127] == - 0 && \ - tmp[159] == 0 && tmp[191] == 0 && tmp[223] == 0 && tmp[255] == - 0) - CF := 1 - ELSE - CF := 0 - FI - IF (ZF == 0 && CF == 0) - dst := 1 - ELSE - dst := 0 - FI - - - AVX -
immintrin.h
- Logical + + + + Compute the bitwise AND of 256 bits (representing single-precision (32-bit) floating-point elements) in "a" and "b", producing an intermediate 256-bit value, and set "ZF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return 1 if both the "ZF" and "CF" values are zero, otherwise return 0. + +tmp[255:0] := a[255:0] AND b[255:0] +IF (tmp[31] == 0 && tmp[63] == 0 && tmp[95] == 0 && tmp[127] == 0 && \ + tmp[159] == 0 && tmp[191] == 0 && tmp[223] == 0 && tmp[255] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +tmp[255:0] := (NOT a[255:0]) AND b[255:0] +IF (tmp[31] == 0 && tmp[63] == 0 && tmp[95] == 0 && tmp[127] == 0 && \ + tmp[159] == 0 && tmp[191] == 0 && tmp[223] == 0 && tmp[255] == 0) + CF := 1 +ELSE + CF := 0 +FI +IF (ZF == 0 && CF == 0) + dst := 1 +ELSE + dst := 0 +FI + + + AVX +
immintrin.h
+ Logical
- - - - Compute the bitwise AND of 128 bits (representing single-precision (32-bit) - floating-point elements) in "a" and "b", producing an intermediate 128-bit value, and - set "ZF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, - otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing - an intermediate value, and set "CF" to 1 if the sign bit of each 32-bit element in the - intermediate value is zero, otherwise set "CF" to 0. Return the "ZF" value. - - tmp[127:0] := a[127:0] AND b[127:0] - IF (tmp[31] == 0 && tmp[63] == 0 && tmp[95] == 0 && tmp[127] == - 0) - ZF := 1 - ELSE - ZF := 0 - FI - tmp[127:0] := (NOT a[127:0]) AND b[127:0] - IF (tmp[31] == 0 && tmp[63] == 0 && tmp[95] == 0 && tmp[127] == - 0) - CF := 1 - ELSE - CF := 0 - FI - dst := ZF - - - AVX -
immintrin.h
- Logical + + + + Compute the bitwise AND of 128 bits (representing single-precision (32-bit) floating-point elements) in "a" and "b", producing an intermediate 128-bit value, and set "ZF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return the "ZF" value. + +tmp[127:0] := a[127:0] AND b[127:0] +IF (tmp[31] == 0 && tmp[63] == 0 && tmp[95] == 0 && tmp[127] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +tmp[127:0] := (NOT a[127:0]) AND b[127:0] +IF (tmp[31] == 0 && tmp[63] == 0 && tmp[95] == 0 && tmp[127] == 0) + CF := 1 +ELSE + CF := 0 +FI +dst := ZF + + + AVX +
immintrin.h
+ Logical
- - - - Compute the bitwise AND of 128 bits (representing single-precision (32-bit) - floating-point elements) in "a" and "b", producing an intermediate 128-bit value, and - set "ZF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, - otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing - an intermediate value, and set "CF" to 1 if the sign bit of each 32-bit element in the - intermediate value is zero, otherwise set "CF" to 0. Return the "CF" value. - - tmp[127:0] := a[127:0] AND b[127:0] - IF (tmp[31] == 0 && tmp[63] == 0 && tmp[95] == 0 && tmp[127] == - 0) - ZF := 1 - ELSE - ZF := 0 - FI - tmp[127:0] := (NOT a[127:0]) AND b[127:0] - IF (tmp[31] == 0 && tmp[63] == 0 && tmp[95] == 0 && tmp[127] == - 0) - CF := 1 - ELSE - CF := 0 - FI - dst := CF - - - AVX -
immintrin.h
- Logical + + + + Compute the bitwise AND of 128 bits (representing single-precision (32-bit) floating-point elements) in "a" and "b", producing an intermediate 128-bit value, and set "ZF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return the "CF" value. + +tmp[127:0] := a[127:0] AND b[127:0] +IF (tmp[31] == 0 && tmp[63] == 0 && tmp[95] == 0 && tmp[127] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +tmp[127:0] := (NOT a[127:0]) AND b[127:0] +IF (tmp[31] == 0 && tmp[63] == 0 && tmp[95] == 0 && tmp[127] == 0) + CF := 1 +ELSE + CF := 0 +FI +dst := CF + + + AVX +
immintrin.h
+ Logical
- - - - Compute the bitwise AND of 128 bits (representing single-precision (32-bit) - floating-point elements) in "a" and "b", producing an intermediate 128-bit value, and - set "ZF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, - otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing - an intermediate value, and set "CF" to 1 if the sign bit of each 32-bit element in the - intermediate value is zero, otherwise set "CF" to 0. Return 1 if both the "ZF" and "CF" - values are zero, otherwise return 0. - - tmp[127:0] := a[127:0] AND b[127:0] - IF (tmp[31] == 0 && tmp[63] == 0 && tmp[95] == 0 && tmp[127] == - 0) - ZF := 1 - ELSE - ZF := 0 - FI - tmp[127:0] := (NOT a[127:0]) AND b[127:0] - IF (tmp[31] == 0 && tmp[63] == 0 && tmp[95] == 0 && tmp[127] == - 0) - CF := 1 - ELSE - CF := 0 - FI - IF (ZF == 0 && CF == 0) - dst := 1 - ELSE - dst := 0 - FI - - - AVX -
immintrin.h
- Logical + + + + Compute the bitwise AND of 128 bits (representing single-precision (32-bit) floating-point elements) in "a" and "b", producing an intermediate 128-bit value, and set "ZF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return 1 if both the "ZF" and "CF" values are zero, otherwise return 0. + +tmp[127:0] := a[127:0] AND b[127:0] +IF (tmp[31] == 0 && tmp[63] == 0 && tmp[95] == 0 && tmp[127] == 0) + ZF := 1 +ELSE + ZF := 0 +FI +tmp[127:0] := (NOT a[127:0]) AND b[127:0] +IF (tmp[31] == 0 && tmp[63] == 0 && tmp[95] == 0 && tmp[127] == 0) + CF := 1 +ELSE + CF := 0 +FI +IF (ZF == 0 && CF == 0) + dst := 1 +ELSE + dst := 0 +FI + + + AVX +
immintrin.h
+ Logical
- - - - - Blend packed double-precision (64-bit) floating-point elements from "a" and "b" - using control mask "imm8", and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - IF imm8[j] - dst[i+63:i] := b[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + + Blend packed double-precision (64-bit) floating-point elements from "a" and "b" using control mask "imm8", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF imm8[j] + dst[i+63:i] := b[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - - Blend packed single-precision (32-bit) floating-point elements from "a" and "b" - using control mask "imm8", and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - IF imm8[j] - dst[i+31:i] := b[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + + Blend packed single-precision (32-bit) floating-point elements from "a" and "b" using control mask "imm8", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF imm8[j] + dst[i+31:i] := b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - - Blend packed double-precision (64-bit) floating-point elements from "a" and "b" - using "mask", and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - IF mask[i+63] - dst[i+63:i] := b[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + + Blend packed double-precision (64-bit) floating-point elements from "a" and "b" using "mask", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF mask[i+63] + dst[i+63:i] := b[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - - Blend packed single-precision (32-bit) floating-point elements from "a" and "b" - using "mask", and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - IF mask[i+31] - dst[i+31:i] := b[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + + Blend packed single-precision (32-bit) floating-point elements from "a" and "b" using "mask", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF mask[i+31] + dst[i+31:i] := b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - - Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes - using the control in "imm8", and store the results in "dst". - - dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] - dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] - dst[191:128] := (imm8[2] == 0) ? a[191:128] : a[255:192] - dst[255:192] := (imm8[3] == 0) ? b[191:128] : b[255:192] - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + + Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes using the control in "imm8", and store the results in "dst". + +dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] +dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] +dst[191:128] := (imm8[2] == 0) ? a[191:128] : a[255:192] +dst[255:192] := (imm8[3] == 0) ? b[191:128] : b[255:192] +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - - Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit - lanes using the control in "imm8", and store the results in "dst". - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - dst[95:64] := SELECT4(b[127:0], imm8[5:4]) - dst[127:96] := SELECT4(b[127:0], imm8[7:6]) - dst[159:128] := SELECT4(a[255:128], imm8[1:0]) - dst[191:160] := SELECT4(a[255:128], imm8[3:2]) - dst[223:192] := SELECT4(b[255:128], imm8[5:4]) - dst[255:224] := SELECT4(b[255:128], imm8[7:6]) - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst". + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +dst[95:64] := SELECT4(b[127:0], imm8[5:4]) +dst[127:96] := SELECT4(b[127:0], imm8[7:6]) +dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +dst[223:192] := SELECT4(b[255:128], imm8[5:4]) +dst[255:224] := SELECT4(b[255:128], imm8[7:6]) +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point - elements) from "a", selected with "imm8", and store the result in "dst". - - CASE imm8[0] OF - 0: dst[127:0] := a[127:0] - 1: dst[127:0] := a[255:128] - ESAC - dst[MAX:128] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[0] OF +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +ESAC +dst[MAX:128] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point - elements) from "a", selected with "imm8", and store the result in "dst". - - CASE imm8[0] OF - 0: dst[127:0] := a[127:0] - 1: dst[127:0] := a[255:128] - ESAC - dst[MAX:128] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[0] OF +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +ESAC +dst[MAX:128] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - Extract 128 bits (composed of integer data) from "a", selected with "imm8", and - store the result in "dst". - - CASE imm8[0] OF - 0: dst[127:0] := a[127:0] - 1: dst[127:0] := a[255:128] - ESAC - dst[MAX:128] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + Extract 128 bits (composed of integer data) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[0] OF +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +ESAC +dst[MAX:128] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - Extract a 32-bit integer from "a", selected with "index", and store the result - in "dst". - - dst[31:0] := (a[255:0] >> (index[2:0] * 32))[31:0] - - AVX -
immintrin.h
- Swizzle + + + + Extract a 32-bit integer from "a", selected with "index", and store the result in "dst". + +dst[31:0] := (a[255:0] >> (index[2:0] * 32))[31:0] + + AVX +
immintrin.h
+ Swizzle
- - - - Extract a 64-bit integer from "a", selected with "index", and store the result - in "dst". - - dst[63:0] := (a[255:0] >> (index[1:0] * 64))[63:0] - - AVX -
immintrin.h
- Swizzle + + + + Extract a 64-bit integer from "a", selected with "index", and store the result in "dst". + +dst[63:0] := (a[255:0] >> (index[1:0] * 64))[63:0] + + AVX +
immintrin.h
+ Swizzle
- - - - Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit - lanes using the control in "b", and store the results in "dst". - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - dst[31:0] := SELECT4(a[127:0], b[1:0]) - dst[63:32] := SELECT4(a[127:0], b[33:32]) - dst[95:64] := SELECT4(a[127:0], b[65:64]) - dst[127:96] := SELECT4(a[127:0], b[97:96]) - dst[159:128] := SELECT4(a[255:128], b[129:128]) - dst[191:160] := SELECT4(a[255:128], b[161:160]) - dst[223:192] := SELECT4(a[255:128], b[193:192]) - dst[255:224] := SELECT4(a[255:128], b[225:224]) - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst". + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +dst[31:0] := SELECT4(a[127:0], b[1:0]) +dst[63:32] := SELECT4(a[127:0], b[33:32]) +dst[95:64] := SELECT4(a[127:0], b[65:64]) +dst[127:96] := SELECT4(a[127:0], b[97:96]) +dst[159:128] := SELECT4(a[255:128], b[129:128]) +dst[191:160] := SELECT4(a[255:128], b[161:160]) +dst[223:192] := SELECT4(a[255:128], b[193:192]) +dst[255:224] := SELECT4(a[255:128], b[225:224]) +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - Shuffle single-precision (32-bit) floating-point elements in "a" using the - control in "b", and store the results in "dst". - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - dst[31:0] := SELECT4(a[127:0], b[1:0]) - dst[63:32] := SELECT4(a[127:0], b[33:32]) - dst[95:64] := SELECT4(a[127:0], b[65:64]) - dst[127:96] := SELECT4(a[127:0], b[97:96]) - dst[MAX:128] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "b", and store the results in "dst". + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +dst[31:0] := SELECT4(a[127:0], b[1:0]) +dst[63:32] := SELECT4(a[127:0], b[33:32]) +dst[95:64] := SELECT4(a[127:0], b[65:64]) +dst[127:96] := SELECT4(a[127:0], b[97:96]) +dst[MAX:128] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit - lanes using the control in "imm8", and store the results in "dst". - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - dst[95:64] := SELECT4(a[127:0], imm8[5:4]) - dst[127:96] := SELECT4(a[127:0], imm8[7:6]) - dst[159:128] := SELECT4(a[255:128], imm8[1:0]) - dst[191:160] := SELECT4(a[255:128], imm8[3:2]) - dst[223:192] := SELECT4(a[255:128], imm8[5:4]) - dst[255:224] := SELECT4(a[255:128], imm8[7:6]) - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst". + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +dst[223:192] := SELECT4(a[255:128], imm8[5:4]) +dst[255:224] := SELECT4(a[255:128], imm8[7:6]) +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - Shuffle single-precision (32-bit) floating-point elements in "a" using the - control in "imm8", and store the results in "dst". - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - dst[95:64] := SELECT4(a[127:0], imm8[5:4]) - dst[127:96] := SELECT4(a[127:0], imm8[7:6]) - dst[MAX:128] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst". + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +dst[MAX:128] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit - lanes using the control in "b", and store the results in "dst". - - IF (b[1] == 0) dst[63:0] := a[63:0]; FI - IF (b[1] == 1) dst[63:0] := a[127:64]; FI - IF (b[65] == 0) dst[127:64] := a[63:0]; FI - IF (b[65] == 1) dst[127:64] := a[127:64]; FI - IF (b[129] == 0) dst[191:128] := a[191:128]; FI - IF (b[129] == 1) dst[191:128] := a[255:192]; FI - IF (b[193] == 0) dst[255:192] := a[191:128]; FI - IF (b[193] == 1) dst[255:192] := a[255:192]; FI - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst". + +IF (b[1] == 0) dst[63:0] := a[63:0]; FI +IF (b[1] == 1) dst[63:0] := a[127:64]; FI +IF (b[65] == 0) dst[127:64] := a[63:0]; FI +IF (b[65] == 1) dst[127:64] := a[127:64]; FI +IF (b[129] == 0) dst[191:128] := a[191:128]; FI +IF (b[129] == 1) dst[191:128] := a[255:192]; FI +IF (b[193] == 0) dst[255:192] := a[191:128]; FI +IF (b[193] == 1) dst[255:192] := a[255:192]; FI +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - Shuffle double-precision (64-bit) floating-point elements in "a" using the - control in "b", and store the results in "dst". - - IF (b[1] == 0) dst[63:0] := a[63:0]; FI - IF (b[1] == 1) dst[63:0] := a[127:64]; FI - IF (b[65] == 0) dst[127:64] := a[63:0]; FI - IF (b[65] == 1) dst[127:64] := a[127:64]; FI - dst[MAX:128] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + Shuffle double-precision (64-bit) floating-point elements in "a" using the control in "b", and store the results in "dst". + +IF (b[1] == 0) dst[63:0] := a[63:0]; FI +IF (b[1] == 1) dst[63:0] := a[127:64]; FI +IF (b[65] == 0) dst[127:64] := a[63:0]; FI +IF (b[65] == 1) dst[127:64] := a[127:64]; FI +dst[MAX:128] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit - lanes using the control in "imm8", and store the results in "dst". - - IF (imm8[0] == 0) dst[63:0] := a[63:0]; FI - IF (imm8[0] == 1) dst[63:0] := a[127:64]; FI - IF (imm8[1] == 0) dst[127:64] := a[63:0]; FI - IF (imm8[1] == 1) dst[127:64] := a[127:64]; FI - IF (imm8[2] == 0) dst[191:128] := a[191:128]; FI - IF (imm8[2] == 1) dst[191:128] := a[255:192]; FI - IF (imm8[3] == 0) dst[255:192] := a[191:128]; FI - IF (imm8[3] == 1) dst[255:192] := a[255:192]; FI - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst". + +IF (imm8[0] == 0) dst[63:0] := a[63:0]; FI +IF (imm8[0] == 1) dst[63:0] := a[127:64]; FI +IF (imm8[1] == 0) dst[127:64] := a[63:0]; FI +IF (imm8[1] == 1) dst[127:64] := a[127:64]; FI +IF (imm8[2] == 0) dst[191:128] := a[191:128]; FI +IF (imm8[2] == 1) dst[191:128] := a[255:192]; FI +IF (imm8[3] == 0) dst[255:192] := a[191:128]; FI +IF (imm8[3] == 1) dst[255:192] := a[255:192]; FI +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - Shuffle double-precision (64-bit) floating-point elements in "a" using the - control in "imm8", and store the results in "dst". - - IF (imm8[0] == 0) dst[63:0] := a[63:0]; FI - IF (imm8[0] == 1) dst[63:0] := a[127:64]; FI - IF (imm8[1] == 0) dst[127:64] := a[63:0]; FI - IF (imm8[1] == 1) dst[127:64] := a[127:64]; FI - dst[MAX:128] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + Shuffle double-precision (64-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst". + +IF (imm8[0] == 0) dst[63:0] := a[63:0]; FI +IF (imm8[0] == 1) dst[63:0] := a[127:64]; FI +IF (imm8[1] == 0) dst[127:64] := a[63:0]; FI +IF (imm8[1] == 1) dst[127:64] := a[127:64]; FI +dst[MAX:128] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - - Shuffle 128-bits (composed of 4 packed single-precision (32-bit) floating-point - elements) selected by "imm8" from "a" and "b", and store the results in "dst". - - DEFINE SELECT4(src1, src2, control) { - CASE(control[1:0]) OF - 0: tmp[127:0] := src1[127:0] - 1: tmp[127:0] := src1[255:128] - 2: tmp[127:0] := src2[127:0] - 3: tmp[127:0] := src2[255:128] - ESAC - IF control[3] - tmp[127:0] := 0 - FI - RETURN tmp[127:0] - } - dst[127:0] := SELECT4(a[255:0], b[255:0], imm8[3:0]) - dst[255:128] := SELECT4(a[255:0], b[255:0], imm8[7:4]) - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + + Shuffle 128-bits (composed of 4 packed single-precision (32-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst". + +DEFINE SELECT4(src1, src2, control) { + CASE(control[1:0]) OF + 0: tmp[127:0] := src1[127:0] + 1: tmp[127:0] := src1[255:128] + 2: tmp[127:0] := src2[127:0] + 3: tmp[127:0] := src2[255:128] + ESAC + IF control[3] + tmp[127:0] := 0 + FI + RETURN tmp[127:0] +} +dst[127:0] := SELECT4(a[255:0], b[255:0], imm8[3:0]) +dst[255:128] := SELECT4(a[255:0], b[255:0], imm8[7:4]) +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - - Shuffle 128-bits (composed of 2 packed double-precision (64-bit) floating-point - elements) selected by "imm8" from "a" and "b", and store the results in "dst". - - DEFINE SELECT4(src1, src2, control) { - CASE(control[1:0]) OF - 0: tmp[127:0] := src1[127:0] - 1: tmp[127:0] := src1[255:128] - 2: tmp[127:0] := src2[127:0] - 3: tmp[127:0] := src2[255:128] - ESAC - IF control[3] - tmp[127:0] := 0 - FI - RETURN tmp[127:0] - } - dst[127:0] := SELECT4(a[255:0], b[255:0], imm8[3:0]) - dst[255:128] := SELECT4(a[255:0], b[255:0], imm8[7:4]) - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + + Shuffle 128-bits (composed of 2 packed double-precision (64-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst". + +DEFINE SELECT4(src1, src2, control) { + CASE(control[1:0]) OF + 0: tmp[127:0] := src1[127:0] + 1: tmp[127:0] := src1[255:128] + 2: tmp[127:0] := src2[127:0] + 3: tmp[127:0] := src2[255:128] + ESAC + IF control[3] + tmp[127:0] := 0 + FI + RETURN tmp[127:0] +} +dst[127:0] := SELECT4(a[255:0], b[255:0], imm8[3:0]) +dst[255:128] := SELECT4(a[255:0], b[255:0], imm8[7:4]) +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - - Shuffle 128-bits (composed of integer data) selected by "imm8" from "a" and - "b", and store the results in "dst". - - DEFINE SELECT4(src1, src2, control) { - CASE(control[1:0]) OF - 0: tmp[127:0] := src1[127:0] - 1: tmp[127:0] := src1[255:128] - 2: tmp[127:0] := src2[127:0] - 3: tmp[127:0] := src2[255:128] - ESAC - IF control[3] - tmp[127:0] := 0 - FI - RETURN tmp[127:0] - } - dst[127:0] := SELECT4(a[255:0], b[255:0], imm8[3:0]) - dst[255:128] := SELECT4(a[255:0], b[255:0], imm8[7:4]) - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + + Shuffle 128-bits (composed of integer data) selected by "imm8" from "a" and "b", and store the results in "dst". + +DEFINE SELECT4(src1, src2, control) { + CASE(control[1:0]) OF + 0: tmp[127:0] := src1[127:0] + 1: tmp[127:0] := src1[255:128] + 2: tmp[127:0] := src2[127:0] + 3: tmp[127:0] := src2[255:128] + ESAC + IF control[3] + tmp[127:0] := 0 + FI + RETURN tmp[127:0] +} +dst[127:0] := SELECT4(a[255:0], b[255:0], imm8[3:0]) +dst[255:128] := SELECT4(a[255:0], b[255:0], imm8[7:4]) +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - - Copy "a" to "dst", then insert 128 bits (composed of 4 packed single-precision - (32-bit) floating-point elements) from "b" into "dst" at the location specified by - "imm8". - - dst[255:0] := a[255:0] - CASE (imm8[0]) OF - 0: dst[127:0] := b[127:0] - 1: dst[255:128] := b[127:0] - ESAC - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + + Copy "a" to "dst", then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "b" into "dst" at the location specified by "imm8". + +dst[255:0] := a[255:0] +CASE (imm8[0]) OF +0: dst[127:0] := b[127:0] +1: dst[255:128] := b[127:0] +ESAC +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - - Copy "a" to "dst", then insert 128 bits (composed of 2 packed double-precision - (64-bit) floating-point elements) from "b" into "dst" at the location specified by - "imm8". - - dst[255:0] := a[255:0] - CASE imm8[0] OF - 0: dst[127:0] := b[127:0] - 1: dst[255:128] := b[127:0] - ESAC - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + + Copy "a" to "dst", then insert 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "b" into "dst" at the location specified by "imm8". + +dst[255:0] := a[255:0] +CASE imm8[0] OF +0: dst[127:0] := b[127:0] +1: dst[255:128] := b[127:0] +ESAC +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - - Copy "a" to "dst", then insert 128 bits from "b" into "dst" at the location - specified by "imm8". - - dst[255:0] := a[255:0] - CASE (imm8[0]) OF - 0: dst[127:0] := b[127:0] - 1: dst[255:128] := b[127:0] - ESAC - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + + Copy "a" to "dst", then insert 128 bits from "b" into "dst" at the location specified by "imm8". + +dst[255:0] := a[255:0] +CASE (imm8[0]) OF +0: dst[127:0] := b[127:0] +1: dst[255:128] := b[127:0] +ESAC +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - - Copy "a" to "dst", and insert the 8-bit integer "i" into "dst" at the location - specified by "index". - - dst[255:0] := a[255:0] - sel := index[4:0]*8 - dst[sel+7:sel] := i[7:0] - - AVX -
immintrin.h
- Swizzle + + + + + Copy "a" to "dst", and insert the 8-bit integer "i" into "dst" at the location specified by "index". + +dst[255:0] := a[255:0] +sel := index[4:0]*8 +dst[sel+7:sel] := i[7:0] + + AVX +
immintrin.h
+ Swizzle
- - - - - Copy "a" to "dst", and insert the 16-bit integer "i" into "dst" at the location - specified by "index". - - dst[255:0] := a[255:0] - sel := index[3:0]*16 - dst[sel+15:sel] := i[15:0] - - AVX -
immintrin.h
- Swizzle + + + + + Copy "a" to "dst", and insert the 16-bit integer "i" into "dst" at the location specified by "index". + +dst[255:0] := a[255:0] +sel := index[3:0]*16 +dst[sel+15:sel] := i[15:0] + + AVX +
immintrin.h
+ Swizzle
- - - - - Copy "a" to "dst", and insert the 32-bit integer "i" into "dst" at the location - specified by "index". - - dst[255:0] := a[255:0] - sel := index[2:0]*32 - dst[sel+31:sel] := i[31:0] - - AVX -
immintrin.h
- Swizzle + + + + + Copy "a" to "dst", and insert the 32-bit integer "i" into "dst" at the location specified by "index". + +dst[255:0] := a[255:0] +sel := index[2:0]*32 +dst[sel+31:sel] := i[31:0] + + AVX +
immintrin.h
+ Swizzle
- - - - - Copy "a" to "dst", and insert the 64-bit integer "i" into "dst" at the location - specified by "index". - - dst[255:0] := a[255:0] - sel := index[1:0]*64 - dst[sel+63:sel] := i[63:0] - - AVX -
immintrin.h
- Swizzle + + + + + Copy "a" to "dst", and insert the 64-bit integer "i" into "dst" at the location specified by "index". + +dst[255:0] := a[255:0] +sel := index[1:0]*64 +dst[sel+63:sel] := i[63:0] + + AVX +
immintrin.h
+ Swizzle
- - - - Unpack and interleave double-precision (64-bit) floating-point elements from - the high half of each 128-bit lane in "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[127:64] - dst[127:64] := src2[127:64] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) - dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - Unpack and interleave single-precision (32-bit) floating-point elements from - the high half of each 128-bit lane in "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[95:64] - dst[63:32] := src2[95:64] - dst[95:64] := src1[127:96] - dst[127:96] := src2[127:96] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) - dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - Unpack and interleave double-precision (64-bit) floating-point elements from - the low half of each 128-bit lane in "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[63:0] - dst[127:64] := src2[63:0] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) - dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - Unpack and interleave single-precision (32-bit) floating-point elements from - the low half of each 128-bit lane in "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[31:0] - dst[63:32] := src2[31:0] - dst[95:64] := src1[63:32] - dst[127:96] := src2[63:32] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) - dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Swizzle + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Swizzle
- - - - Compare packed double-precision (64-bit) floating-point elements in "a" and - "b", and store packed maximum values in "dst". [max_float_note] - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Special Math Functions + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst". [max_float_note] + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Special Math Functions
- - - - Compare packed single-precision (32-bit) floating-point elements in "a" and - "b", and store packed maximum values in "dst". [max_float_note] - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Special Math Functions + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst". [max_float_note] + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Special Math Functions
- - - - Compare packed double-precision (64-bit) floating-point elements in "a" and - "b", and store packed minimum values in "dst". [min_float_note] - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Special Math Functions + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst". [min_float_note] + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Special Math Functions
- - - - Compare packed single-precision (32-bit) floating-point elements in "a" and - "b", and store packed minimum values in "dst". [min_float_note] - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Special Math Functions + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst". [min_float_note] + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Special Math Functions
- - - - Round the packed double-precision (64-bit) floating-point elements in "a" using - the "rounding" parameter, and store the results as packed double-precision - floating-point elements in "dst". - [round_note] - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := ROUND(a[i+63:i], rounding) - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Special Math Functions + + + + Round the packed double-precision (64-bit) floating-point elements in "a" using the "rounding" parameter, and store the results as packed double-precision floating-point elements in "dst". + [round_note] + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ROUND(a[i+63:i], rounding) +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Special Math Functions
- - - - Round the packed single-precision (32-bit) floating-point elements in "a" using - the "rounding" parameter, and store the results as packed single-precision - floating-point elements in "dst". - [round_note] - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := ROUND(a[i+31:i], rounding) - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Special Math Functions + + + + Round the packed single-precision (32-bit) floating-point elements in "a" using the "rounding" parameter, and store the results as packed single-precision floating-point elements in "dst". + [round_note] + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ROUND(a[i+31:i], rounding) +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Special Math Functions
- - - Round the packed single-precision (32-bit) floating-point elements in "a" down - to an integer value, and store the results as packed single-precision floating-point - elements in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := FLOOR(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Special Math Functions + + + Round the packed single-precision (32-bit) floating-point elements in "a" down to an integer value, and store the results as packed single-precision floating-point elements in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := FLOOR(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Special Math Functions
- - - Round the packed single-precision (32-bit) floating-point elements in "a" up to - an integer value, and store the results as packed single-precision floating-point - elements in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := CEIL(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Special Math Functions + + + Round the packed single-precision (32-bit) floating-point elements in "a" up to an integer value, and store the results as packed single-precision floating-point elements in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := CEIL(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Special Math Functions
- - - Round the packed double-precision (64-bit) floating-point elements in "a" down - to an integer value, and store the results as packed double-precision floating-point - elements in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := FLOOR(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Special Math Functions + + + Round the packed double-precision (64-bit) floating-point elements in "a" down to an integer value, and store the results as packed double-precision floating-point elements in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := FLOOR(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Special Math Functions
- - - Round the packed double-precision (64-bit) floating-point elements in "a" up to - an integer value, and store the results as packed double-precision floating-point - elements in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := CEIL(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Special Math Functions + + + Round the packed double-precision (64-bit) floating-point elements in "a" up to an integer value, and store the results as packed double-precision floating-point elements in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := CEIL(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Special Math Functions
- - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in "dst". - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := ( a[i+63:i] OP b[i+63:i] ) ? 0xFFFFFFFFFFFFFFFF : 0 - ENDFOR - dst[MAX:128] := 0 - - - AVX -
immintrin.h
- Compare + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in "dst". + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ( a[i+63:i] OP b[i+63:i] ) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR +dst[MAX:128] := 0 + + + AVX +
immintrin.h
+ Compare
- - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in "dst". - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := ( a[i+63:i] OP b[i+63:i] ) ? 0xFFFFFFFFFFFFFFFF : 0 - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Compare + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in "dst". + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ( a[i+63:i] OP b[i+63:i] ) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Compare
- - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in "dst". - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ( a[i+31:i] OP b[i+31:i] ) ? 0xFFFFFFFF : 0 - ENDFOR - dst[MAX:128] := 0 - - - AVX -
immintrin.h
- Compare + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in "dst". + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ( a[i+31:i] OP b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR +dst[MAX:128] := 0 + + + AVX +
immintrin.h
+ Compare
- - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in "dst". - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := ( a[i+31:i] OP b[i+31:i] ) ? 0xFFFFFFFF : 0 - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Compare + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in "dst". + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ( a[i+31:i] OP b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Compare
- - - - - Compare the lower double-precision (64-bit) floating-point element in "a" and - "b" based on the comparison operand specified by "imm8", store the result in the lower - element of "dst", and copy the upper element from "a" to the upper element of "dst". - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - dst[63:0] := ( a[63:0] OP b[63:0] ) ? 0xFFFFFFFFFFFFFFFF : 0 - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX -
immintrin.h
- Compare + + + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +dst[63:0] := ( a[63:0] OP b[63:0] ) ? 0xFFFFFFFFFFFFFFFF : 0 +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX +
immintrin.h
+ Compare
- - - - - Compare the lower single-precision (32-bit) floating-point element in "a" and - "b" based on the comparison operand specified by "imm8", store the result in the lower - element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of - "dst". - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - dst[31:0] := ( a[31:0] OP b[31:0] ) ? 0xFFFFFFFF : 0 - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX -
immintrin.h
- Compare + + + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +dst[31:0] := ( a[31:0] OP b[31:0] ) ? 0xFFFFFFFF : 0 +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX +
immintrin.h
+ Compare
- - - Convert packed signed 32-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - m := j*64 - dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Convert + + + Convert packed signed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + m := j*64 + dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Convert
- - - Convert packed signed 32-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 7 - i := 32*j - dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Convert + + + Convert packed signed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed single-precision (32-bit) floating-point elements, and store the results in - "dst". - - FOR j := 0 to 3 - i := 32*j - k := 64*j - dst[i+31:i] := Convert_FP64_To_FP32(a[k+63:k]) - ENDFOR - dst[MAX:128] := 0 - - - AVX -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_FP32(a[k+63:k]) +ENDFOR +dst[MAX:128] := 0 + + + AVX +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst". - - FOR j := 0 to 7 - i := 32*j - dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed double-precision (64-bit) floating-point elements, and store the results in - "dst". - - FOR j := 0 to 3 - i := 64*j - k := 32*j - dst[i+63:i] := Convert_FP32_To_FP64(a[k+31:k]) - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := 64*j + k := 32*j + dst[i+63:i] := Convert_FP32_To_FP64(a[k+31:k]) +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 3 - i := 32*j - k := 64*j - dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[k+63:k]) - ENDFOR - dst[MAX:128] := 0 - - - AVX -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[k+63:k]) +ENDFOR +dst[MAX:128] := 0 + + + AVX +
immintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst". - - FOR j := 0 to 3 - i := 32*j - k := 64*j - dst[i+31:i] := Convert_FP64_To_Int32(a[k+63:k]) - ENDFOR - dst[MAX:128] := 0 - - - AVX -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_Int32(a[k+63:k]) +ENDFOR +dst[MAX:128] := 0 + + + AVX +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 7 - i := 32*j - dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Convert
- - - Copy the lower single-precision (32-bit) floating-point element of "a" to - "dst". - - dst[31:0] := a[31:0] - - - AVX -
immintrin.h
- Convert + + + Copy the lower single-precision (32-bit) floating-point element of "a" to "dst". + +dst[31:0] := a[31:0] + + + AVX +
immintrin.h
+ Convert
- - - Copy the lower double-precision (64-bit) floating-point element of "a" to - "dst". - - dst[63:0] := a[63:0] - - - AVX -
immintrin.h
- Convert + + + Copy the lower double-precision (64-bit) floating-point element of "a" to "dst". + +dst[63:0] := a[63:0] + + + AVX +
immintrin.h
+ Convert
- - - Copy the lower 32-bit integer in "a" to "dst". - - dst[31:0] := a[31:0] - - - AVX -
immintrin.h
- Convert + + + Copy the lower 32-bit integer in "a" to "dst". + +dst[31:0] := a[31:0] + + + AVX +
immintrin.h
+ Convert
- - - Zero the contents of all XMM or YMM registers. - YMM0[MAX:0] := 0 - YMM1[MAX:0] := 0 - YMM2[MAX:0] := 0 - YMM3[MAX:0] := 0 - YMM4[MAX:0] := 0 - YMM5[MAX:0] := 0 - YMM6[MAX:0] := 0 - YMM7[MAX:0] := 0 - IF _64_BIT_MODE - YMM8[MAX:0] := 0 - YMM9[MAX:0] := 0 - YMM10[MAX:0] := 0 - YMM11[MAX:0] := 0 - YMM12[MAX:0] := 0 - YMM13[MAX:0] := 0 - YMM14[MAX:0] := 0 - YMM15[MAX:0] := 0 - FI - - - AVX -
immintrin.h
- General Support + + + Zero the contents of all XMM or YMM registers. + YMM0[MAX:0] := 0 +YMM1[MAX:0] := 0 +YMM2[MAX:0] := 0 +YMM3[MAX:0] := 0 +YMM4[MAX:0] := 0 +YMM5[MAX:0] := 0 +YMM6[MAX:0] := 0 +YMM7[MAX:0] := 0 +IF _64_BIT_MODE + YMM8[MAX:0] := 0 + YMM9[MAX:0] := 0 + YMM10[MAX:0] := 0 + YMM11[MAX:0] := 0 + YMM12[MAX:0] := 0 + YMM13[MAX:0] := 0 + YMM14[MAX:0] := 0 + YMM15[MAX:0] := 0 +FI + + + AVX +
immintrin.h
+ General Support
- - - Zero the upper 128 bits of all YMM registers; the lower 128-bits of the - registers are unmodified. - YMM0[MAX:128] := 0 - YMM1[MAX:128] := 0 - YMM2[MAX:128] := 0 - YMM3[MAX:128] := 0 - YMM4[MAX:128] := 0 - YMM5[MAX:128] := 0 - YMM6[MAX:128] := 0 - YMM7[MAX:128] := 0 - IF _64_BIT_MODE - YMM8[MAX:128] := 0 - YMM9[MAX:128] := 0 - YMM10[MAX:128] := 0 - YMM11[MAX:128] := 0 - YMM12[MAX:128] := 0 - YMM13[MAX:128] := 0 - YMM14[MAX:128] := 0 - YMM15[MAX:128] := 0 - FI - - - AVX -
immintrin.h
- General Support + + + Zero the upper 128 bits of all YMM registers; the lower 128-bits of the registers are unmodified. + YMM0[MAX:128] := 0 +YMM1[MAX:128] := 0 +YMM2[MAX:128] := 0 +YMM3[MAX:128] := 0 +YMM4[MAX:128] := 0 +YMM5[MAX:128] := 0 +YMM6[MAX:128] := 0 +YMM7[MAX:128] := 0 +IF _64_BIT_MODE + YMM8[MAX:128] := 0 + YMM9[MAX:128] := 0 + YMM10[MAX:128] := 0 + YMM11[MAX:128] := 0 + YMM12[MAX:128] := 0 + YMM13[MAX:128] := 0 + YMM14[MAX:128] := 0 + YMM15[MAX:128] := 0 +FI + + + AVX +
immintrin.h
+ General Support
- - - Return vector of type __m256 with undefined elements. - AVX -
immintrin.h
- General Support + + + Return vector of type __m256 with undefined elements. + AVX +
immintrin.h
+ General Support
- - - Return vector of type __m256d with undefined elements. - AVX -
immintrin.h
- General Support + + + Return vector of type __m256d with undefined elements. + AVX +
immintrin.h
+ General Support
- - - Return vector of type __m256i with undefined elements. - AVX -
immintrin.h
- General Support + + + Return vector of type __m256i with undefined elements. + AVX +
immintrin.h
+ General Support
- - - Broadcast a single-precision (32-bit) floating-point element from memory to all - elements of "dst". - - tmp[31:0] := MEM[mem_addr+31:mem_addr] - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := tmp[31:0] - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Load + + + Broadcast a single-precision (32-bit) floating-point element from memory to all elements of "dst". + +tmp[31:0] := MEM[mem_addr+31:mem_addr] +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := tmp[31:0] +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Load
- Swizzle - - - Broadcast a single-precision (32-bit) floating-point element from memory to all - elements of "dst". - - tmp[31:0] := MEM[mem_addr+31:mem_addr] - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := tmp[31:0] - ENDFOR - dst[MAX:128] := 0 - - - AVX -
immintrin.h
- Load + Swizzle + + + Broadcast a single-precision (32-bit) floating-point element from memory to all elements of "dst". + +tmp[31:0] := MEM[mem_addr+31:mem_addr] +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := tmp[31:0] +ENDFOR +dst[MAX:128] := 0 + + + AVX +
immintrin.h
+ Load
- Swizzle - - - Broadcast a double-precision (64-bit) floating-point element from memory to all - elements of "dst". - - tmp[63:0] := MEM[mem_addr+63:mem_addr] - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := tmp[63:0] - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Load + Swizzle + + + Broadcast a double-precision (64-bit) floating-point element from memory to all elements of "dst". + +tmp[63:0] := MEM[mem_addr+63:mem_addr] +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := tmp[63:0] +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Load
- Swizzle - - - Broadcast 128 bits from memory (composed of 4 packed single-precision (32-bit) - floating-point elements) to all elements of "dst". - - tmp[127:0] := MEM[mem_addr+127:mem_addr] - dst[127:0] := tmp[127:0] - dst[255:128] := tmp[127:0] - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Load + Swizzle + + + Broadcast 128 bits from memory (composed of 4 packed single-precision (32-bit) floating-point elements) to all elements of "dst". + +tmp[127:0] := MEM[mem_addr+127:mem_addr] +dst[127:0] := tmp[127:0] +dst[255:128] := tmp[127:0] +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Load
- Swizzle - - - Broadcast 128 bits from memory (composed of 2 packed double-precision (64-bit) - floating-point elements) to all elements of "dst". - - tmp[127:0] := MEM[mem_addr+127:mem_addr] - dst[127:0] := tmp[127:0] - dst[255:128] := tmp[127:0] - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Load + Swizzle + + + Broadcast 128 bits from memory (composed of 2 packed double-precision (64-bit) floating-point elements) to all elements of "dst". + +tmp[127:0] := MEM[mem_addr+127:mem_addr] +dst[127:0] := tmp[127:0] +dst[255:128] := tmp[127:0] +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Load
- - - Load 256-bits (composed of 4 packed double-precision (64-bit) floating-point - elements) from memory into "dst". - "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may - be generated. - - dst[255:0] := MEM[mem_addr+255:mem_addr] - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Load + + + Load 256-bits (composed of 4 packed double-precision (64-bit) floating-point elements) from memory into "dst". + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +dst[255:0] := MEM[mem_addr+255:mem_addr] +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Load
- - - Load 256-bits (composed of 8 packed single-precision (32-bit) floating-point - elements) from memory into "dst". - "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may - be generated. - - dst[255:0] := MEM[mem_addr+255:mem_addr] - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Load + + + Load 256-bits (composed of 8 packed single-precision (32-bit) floating-point elements) from memory into "dst". + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +dst[255:0] := MEM[mem_addr+255:mem_addr] +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Load
- - - Load 256-bits (composed of 4 packed double-precision (64-bit) floating-point - elements) from memory into "dst". - "mem_addr" does not need to be aligned on any particular boundary. - - dst[255:0] := MEM[mem_addr+255:mem_addr] - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Load + + + Load 256-bits (composed of 4 packed double-precision (64-bit) floating-point elements) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[255:0] := MEM[mem_addr+255:mem_addr] +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Load
- - - Load 256-bits (composed of 8 packed single-precision (32-bit) floating-point - elements) from memory into "dst". - "mem_addr" does not need to be aligned on any particular boundary. - - dst[255:0] := MEM[mem_addr+255:mem_addr] - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Load + + + Load 256-bits (composed of 8 packed single-precision (32-bit) floating-point elements) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[255:0] := MEM[mem_addr+255:mem_addr] +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Load
- - - Load 256-bits of integer data from memory into "dst". - "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may - be generated. - - dst[255:0] := MEM[mem_addr+255:mem_addr] - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Load + + + Load 256-bits of integer data from memory into "dst". + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +dst[255:0] := MEM[mem_addr+255:mem_addr] +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Load
- - - Load 256-bits of integer data from memory into "dst". - "mem_addr" does not need to be aligned on any particular boundary. - - dst[255:0] := MEM[mem_addr+255:mem_addr] - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Load + + + Load 256-bits of integer data from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[255:0] := MEM[mem_addr+255:mem_addr] +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Load
- - - - Load packed double-precision (64-bit) floating-point elements from memory into - "dst" using "mask" (elements are zeroed out when the high bit of the corresponding - element is not set). - - FOR j := 0 to 3 - i := j*64 - IF mask[i+63] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Load + + + + Load packed double-precision (64-bit) floating-point elements from memory into "dst" using "mask" (elements are zeroed out when the high bit of the corresponding element is not set). + +FOR j := 0 to 3 + i := j*64 + IF mask[i+63] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Load
- - - - Load packed double-precision (64-bit) floating-point elements from memory into - "dst" using "mask" (elements are zeroed out when the high bit of the corresponding - element is not set). - - FOR j := 0 to 1 - i := j*64 - IF mask[i+63] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX -
immintrin.h
- Load + + + + Load packed double-precision (64-bit) floating-point elements from memory into "dst" using "mask" (elements are zeroed out when the high bit of the corresponding element is not set). + +FOR j := 0 to 1 + i := j*64 + IF mask[i+63] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX +
immintrin.h
+ Load
- - - - Load packed single-precision (32-bit) floating-point elements from memory into - "dst" using "mask" (elements are zeroed out when the high bit of the corresponding - element is not set). - - FOR j := 0 to 7 - i := j*32 - IF mask[i+31] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Load + + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using "mask" (elements are zeroed out when the high bit of the corresponding element is not set). + +FOR j := 0 to 7 + i := j*32 + IF mask[i+31] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Load
- - - - Load packed single-precision (32-bit) floating-point elements from memory into - "dst" using "mask" (elements are zeroed out when the high bit of the corresponding - element is not set). - - FOR j := 0 to 3 - i := j*32 - IF mask[i+31] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX -
immintrin.h
- Load + + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using "mask" (elements are zeroed out when the high bit of the corresponding element is not set). + +FOR j := 0 to 3 + i := j*32 + IF mask[i+31] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX +
immintrin.h
+ Load
- - - Load 256-bits of integer data from unaligned memory into "dst". This intrinsic - may perform better than "_mm256_loadu_si256" when the data crosses a cache line - boundary. - - dst[255:0] := MEM[mem_addr+255:mem_addr] - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Load + + + Load 256-bits of integer data from unaligned memory into "dst". This intrinsic may perform better than "_mm256_loadu_si256" when the data crosses a cache line boundary. + +dst[255:0] := MEM[mem_addr+255:mem_addr] +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Load
- - - - Load two 128-bit values (composed of 4 packed single-precision (32-bit) - floating-point elements) from memory, and combine them into a 256-bit value in "dst". - "hiaddr" and "loaddr" do not need to be aligned on any particular boundary. - - dst[127:0] := MEM[loaddr+127:loaddr] - dst[255:128] := MEM[hiaddr+127:hiaddr] - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Load + + + + Load two 128-bit values (composed of 4 packed single-precision (32-bit) floating-point elements) from memory, and combine them into a 256-bit value in "dst". + "hiaddr" and "loaddr" do not need to be aligned on any particular boundary. + +dst[127:0] := MEM[loaddr+127:loaddr] +dst[255:128] := MEM[hiaddr+127:hiaddr] +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Load
- - - - Load two 128-bit values (composed of 2 packed double-precision (64-bit) - floating-point elements) from memory, and combine them into a 256-bit value in "dst". - "hiaddr" and "loaddr" do not need to be aligned on any particular boundary. - - dst[127:0] := MEM[loaddr+127:loaddr] - dst[255:128] := MEM[hiaddr+127:hiaddr] - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Load + + + + Load two 128-bit values (composed of 2 packed double-precision (64-bit) floating-point elements) from memory, and combine them into a 256-bit value in "dst". + "hiaddr" and "loaddr" do not need to be aligned on any particular boundary. + +dst[127:0] := MEM[loaddr+127:loaddr] +dst[255:128] := MEM[hiaddr+127:hiaddr] +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Load
- - - - Load two 128-bit values (composed of integer data) from memory, and combine - them into a 256-bit value in "dst". - "hiaddr" and "loaddr" do not need to be aligned on any particular boundary. - - dst[127:0] := MEM[loaddr+127:loaddr] - dst[255:128] := MEM[hiaddr+127:hiaddr] - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Load + + + + Load two 128-bit values (composed of integer data) from memory, and combine them into a 256-bit value in "dst". + "hiaddr" and "loaddr" do not need to be aligned on any particular boundary. + +dst[127:0] := MEM[loaddr+127:loaddr] +dst[255:128] := MEM[hiaddr+127:hiaddr] +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Load
- - - - Store 256-bits (composed of 4 packed double-precision (64-bit) floating-point - elements) from "a" into memory. - "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+255:mem_addr] := a[255:0] - - - AVX -
immintrin.h
- Store + + + + Store 256-bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "a" into memory. + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+255:mem_addr] := a[255:0] + + + AVX +
immintrin.h
+ Store
- - - - Store 256-bits (composed of 8 packed single-precision (32-bit) floating-point - elements) from "a" into memory. - "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+255:mem_addr] := a[255:0] - - - AVX -
immintrin.h
- Store + + + + Store 256-bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "a" into memory. + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+255:mem_addr] := a[255:0] + + + AVX +
immintrin.h
+ Store
- - - - Store 256-bits (composed of 4 packed double-precision (64-bit) floating-point - elements) from "a" into memory. - "mem_addr" does not need to be aligned on any particular boundary. - - MEM[mem_addr+255:mem_addr] := a[255:0] - - - AVX -
immintrin.h
- Store + + + + Store 256-bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+255:mem_addr] := a[255:0] + + + AVX +
immintrin.h
+ Store
- - - - Store 256-bits (composed of 8 packed single-precision (32-bit) floating-point - elements) from "a" into memory. - "mem_addr" does not need to be aligned on any particular boundary. - - MEM[mem_addr+255:mem_addr] := a[255:0] - - - AVX -
immintrin.h
- Store + + + + Store 256-bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+255:mem_addr] := a[255:0] + + + AVX +
immintrin.h
+ Store
- - - - Store 256-bits of integer data from "a" into memory. - "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+255:mem_addr] := a[255:0] - - - AVX -
immintrin.h
- Store + + + + Store 256-bits of integer data from "a" into memory. + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+255:mem_addr] := a[255:0] + + + AVX +
immintrin.h
+ Store
- - - - Store 256-bits of integer data from "a" into memory. - "mem_addr" does not need to be aligned on any particular boundary. - - MEM[mem_addr+255:mem_addr] := a[255:0] - - - AVX -
immintrin.h
- Store + + + + Store 256-bits of integer data from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+255:mem_addr] := a[255:0] + + + AVX +
immintrin.h
+ Store
- - - - - Store packed double-precision (64-bit) floating-point elements from "a" into - memory using "mask". - - FOR j := 0 to 3 - i := j*64 - IF mask[i+63] - MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] - FI - ENDFOR - - - AVX -
immintrin.h
- Store + + + + + Store packed double-precision (64-bit) floating-point elements from "a" into memory using "mask". + +FOR j := 0 to 3 + i := j*64 + IF mask[i+63] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + + AVX +
immintrin.h
+ Store
- - - - - Store packed double-precision (64-bit) floating-point elements from "a" into - memory using "mask". - - FOR j := 0 to 1 - i := j*64 - IF mask[i+63] - MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] - FI - ENDFOR - - - AVX -
immintrin.h
- Store + + + + + Store packed double-precision (64-bit) floating-point elements from "a" into memory using "mask". + +FOR j := 0 to 1 + i := j*64 + IF mask[i+63] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + + AVX +
immintrin.h
+ Store
- - - - - Store packed single-precision (32-bit) floating-point elements from "a" into - memory using "mask". - - FOR j := 0 to 7 - i := j*32 - IF mask[i+31] - MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] - FI - ENDFOR - - - AVX -
immintrin.h
- Store + + + + + Store packed single-precision (32-bit) floating-point elements from "a" into memory using "mask". + +FOR j := 0 to 7 + i := j*32 + IF mask[i+31] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + + AVX +
immintrin.h
+ Store
- - - - - Store packed single-precision (32-bit) floating-point elements from "a" into - memory using "mask". - - FOR j := 0 to 3 - i := j*32 - IF mask[i+31] - MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] - FI - ENDFOR - - - AVX -
immintrin.h
- Store + + + + + Store packed single-precision (32-bit) floating-point elements from "a" into memory using "mask". + +FOR j := 0 to 3 + i := j*32 + IF mask[i+31] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + + AVX +
immintrin.h
+ Store
- - - - Store 256-bits of integer data from "a" into memory using a non-temporal memory - hint. - "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+255:mem_addr] := a[255:0] - - - AVX -
immintrin.h
- Store + + + + Store 256-bits of integer data from "a" into memory using a non-temporal memory hint. + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+255:mem_addr] := a[255:0] + + + AVX +
immintrin.h
+ Store
- - - - Store 256-bits (composed of 4 packed double-precision (64-bit) floating-point - elements) from "a" into memory using a non-temporal memory hint. - "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+255:mem_addr] := a[255:0] - - - AVX -
immintrin.h
- Store + + + + Store 256-bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "a" into memory using a non-temporal memory hint. + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+255:mem_addr] := a[255:0] + + + AVX +
immintrin.h
+ Store
- - - - Store 256-bits (composed of 8 packed single-precision (32-bit) floating-point - elements) from "a" into memory using a non-temporal memory hint. - "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+255:mem_addr] := a[255:0] - - - AVX -
immintrin.h
- Store + + + + Store 256-bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "a" into memory using a non-temporal memory hint. + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+255:mem_addr] := a[255:0] + + + AVX +
immintrin.h
+ Store
- - - - - Store the high and low 128-bit halves (each composed of 4 packed - single-precision (32-bit) floating-point elements) from "a" into memory two different - 128-bit locations. - "hiaddr" and "loaddr" do not need to be aligned on any particular boundary. - - MEM[loaddr+127:loaddr] := a[127:0] - MEM[hiaddr+127:hiaddr] := a[255:128] - - AVX -
immintrin.h
- Store + + + + + Store the high and low 128-bit halves (each composed of 4 packed single-precision (32-bit) floating-point elements) from "a" into memory two different 128-bit locations. + "hiaddr" and "loaddr" do not need to be aligned on any particular boundary. + +MEM[loaddr+127:loaddr] := a[127:0] +MEM[hiaddr+127:hiaddr] := a[255:128] + + AVX +
immintrin.h
+ Store
- - - - - Store the high and low 128-bit halves (each composed of 2 packed - double-precision (64-bit) floating-point elements) from "a" into memory two different - 128-bit locations. - "hiaddr" and "loaddr" do not need to be aligned on any particular boundary. - - MEM[loaddr+127:loaddr] := a[127:0] - MEM[hiaddr+127:hiaddr] := a[255:128] - - AVX -
immintrin.h
- Store + + + + + Store the high and low 128-bit halves (each composed of 2 packed double-precision (64-bit) floating-point elements) from "a" into memory two different 128-bit locations. + "hiaddr" and "loaddr" do not need to be aligned on any particular boundary. + +MEM[loaddr+127:loaddr] := a[127:0] +MEM[hiaddr+127:hiaddr] := a[255:128] + + AVX +
immintrin.h
+ Store
- - - - - Store the high and low 128-bit halves (each composed of integer data) from "a" - into memory two different 128-bit locations. - "hiaddr" and "loaddr" do not need to be aligned on any particular boundary. - - MEM[loaddr+127:loaddr] := a[127:0] - MEM[hiaddr+127:hiaddr] := a[255:128] - - AVX -
immintrin.h
- Store + + + + + Store the high and low 128-bit halves (each composed of integer data) from "a" into memory two different 128-bit locations. + "hiaddr" and "loaddr" do not need to be aligned on any particular boundary. + +MEM[loaddr+127:loaddr] := a[127:0] +MEM[hiaddr+127:hiaddr] := a[255:128] + + AVX +
immintrin.h
+ Store
- - - Duplicate odd-indexed single-precision (32-bit) floating-point elements from - "a", and store the results in "dst". - - dst[31:0] := a[63:32] - dst[63:32] := a[63:32] - dst[95:64] := a[127:96] - dst[127:96] := a[127:96] - dst[159:128] := a[191:160] - dst[191:160] := a[191:160] - dst[223:192] := a[255:224] - dst[255:224] := a[255:224] - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Move + + + Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst". + +dst[31:0] := a[63:32] +dst[63:32] := a[63:32] +dst[95:64] := a[127:96] +dst[127:96] := a[127:96] +dst[159:128] := a[191:160] +dst[191:160] := a[191:160] +dst[223:192] := a[255:224] +dst[255:224] := a[255:224] +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Move
- - - Duplicate even-indexed single-precision (32-bit) floating-point elements from - "a", and store the results in "dst". - - dst[31:0] := a[31:0] - dst[63:32] := a[31:0] - dst[95:64] := a[95:64] - dst[127:96] := a[95:64] - dst[159:128] := a[159:128] - dst[191:160] := a[159:128] - dst[223:192] := a[223:192] - dst[255:224] := a[223:192] - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Move + + + Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst". + +dst[31:0] := a[31:0] +dst[63:32] := a[31:0] +dst[95:64] := a[95:64] +dst[127:96] := a[95:64] +dst[159:128] := a[159:128] +dst[191:160] := a[159:128] +dst[223:192] := a[223:192] +dst[255:224] := a[223:192] +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Move
- - - Duplicate even-indexed double-precision (64-bit) floating-point elements from - "a", and store the results in "dst". - - dst[63:0] := a[63:0] - dst[127:64] := a[63:0] - dst[191:128] := a[191:128] - dst[255:192] := a[191:128] - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Move + + + Duplicate even-indexed double-precision (64-bit) floating-point elements from "a", and store the results in "dst". + +dst[63:0] := a[63:0] +dst[127:64] := a[63:0] +dst[191:128] := a[191:128] +dst[255:192] := a[191:128] +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Move
- - - Compute the approximate reciprocal of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst". The maximum relative - error for this approximation is less than 1.5*2^-12. - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := 1.0 / a[i+31:i] - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 1.5*2^-12. + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := 1.0 / a[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the approximate reciprocal square root of packed single-precision - (32-bit) floating-point elements in "a", and store the results in "dst". The maximum - relative error for this approximation is less than 1.5*2^-12. - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := (1.0 / SQRT(a[i+31:i])) - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 1.5*2^-12. + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := (1.0 / SQRT(a[i+31:i])) +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the square root of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := SQRT(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := SQRT(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Compute the square root of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := SQRT(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Elementary Math Functions + + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := SQRT(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Elementary Math Functions
- - - Set each bit of mask "dst" based on the most significant bit of the - corresponding packed double-precision (64-bit) floating-point element in "a". - - FOR j := 0 to 3 - i := j*64 - IF a[i+63] - dst[j] := 1 - ELSE - dst[j] := 0 - FI - ENDFOR - dst[MAX:4] := 0 - - - AVX -
immintrin.h
- Miscellaneous + + + Set each bit of mask "dst" based on the most significant bit of the corresponding packed double-precision (64-bit) floating-point element in "a". + +FOR j := 0 to 3 + i := j*64 + IF a[i+63] + dst[j] := 1 + ELSE + dst[j] := 0 + FI +ENDFOR +dst[MAX:4] := 0 + + + AVX +
immintrin.h
+ Miscellaneous
- - - Set each bit of mask "dst" based on the most significant bit of the - corresponding packed single-precision (32-bit) floating-point element in "a". - - FOR j := 0 to 7 - i := j*32 - IF a[i+31] - dst[j] := 1 - ELSE - dst[j] := 0 - FI - ENDFOR - dst[MAX:8] := 0 - - - AVX -
immintrin.h
- Miscellaneous + + + Set each bit of mask "dst" based on the most significant bit of the corresponding packed single-precision (32-bit) floating-point element in "a". + +FOR j := 0 to 7 + i := j*32 + IF a[i+31] + dst[j] := 1 + ELSE + dst[j] := 0 + FI +ENDFOR +dst[MAX:8] := 0 + + + AVX +
immintrin.h
+ Miscellaneous
- - - Return vector of type __m256d with all elements set to zero. - - dst[MAX:0] := 0 - - - AVX -
immintrin.h
- Set + + + Return vector of type __m256d with all elements set to zero. + +dst[MAX:0] := 0 + + + AVX +
immintrin.h
+ Set
- - - Return vector of type __m256 with all elements set to zero. - - dst[MAX:0] := 0 - - - AVX -
immintrin.h
- Set + + + Return vector of type __m256 with all elements set to zero. + +dst[MAX:0] := 0 + + + AVX +
immintrin.h
+ Set
- - - Return vector of type __m256i with all elements set to zero. - - dst[MAX:0] := 0 - - - AVX -
immintrin.h
- Set + + + Return vector of type __m256i with all elements set to zero. + +dst[MAX:0] := 0 + + + AVX +
immintrin.h
+ Set
- - - - - - Set packed double-precision (64-bit) floating-point elements in "dst" with the - supplied values. - - dst[63:0] := e0 - dst[127:64] := e1 - dst[191:128] := e2 - dst[255:192] := e3 - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Set + + + + + + Set packed double-precision (64-bit) floating-point elements in "dst" with the supplied values. + +dst[63:0] := e0 +dst[127:64] := e1 +dst[191:128] := e2 +dst[255:192] := e3 +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Set
- - - - - - - - - - Set packed single-precision (32-bit) floating-point elements in "dst" with the - supplied values. - - dst[31:0] := e0 - dst[63:32] := e1 - dst[95:64] := e2 - dst[127:96] := e3 - dst[159:128] := e4 - dst[191:160] := e5 - dst[223:192] := e6 - dst[255:224] := e7 - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Set + + + + + + + + + + Set packed single-precision (32-bit) floating-point elements in "dst" with the supplied values. + +dst[31:0] := e0 +dst[63:32] := e1 +dst[95:64] := e2 +dst[127:96] := e3 +dst[159:128] := e4 +dst[191:160] := e5 +dst[223:192] := e6 +dst[255:224] := e7 +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Set
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Set packed 8-bit integers in "dst" with the supplied values. - - dst[7:0] := e0 - dst[15:8] := e1 - dst[23:16] := e2 - dst[31:24] := e3 - dst[39:32] := e4 - dst[47:40] := e5 - dst[55:48] := e6 - dst[63:56] := e7 - dst[71:64] := e8 - dst[79:72] := e9 - dst[87:80] := e10 - dst[95:88] := e11 - dst[103:96] := e12 - dst[111:104] := e13 - dst[119:112] := e14 - dst[127:120] := e15 - dst[135:128] := e16 - dst[143:136] := e17 - dst[151:144] := e18 - dst[159:152] := e19 - dst[167:160] := e20 - dst[175:168] := e21 - dst[183:176] := e22 - dst[191:184] := e23 - dst[199:192] := e24 - dst[207:200] := e25 - dst[215:208] := e26 - dst[223:216] := e27 - dst[231:224] := e28 - dst[239:232] := e29 - dst[247:240] := e30 - dst[255:248] := e31 - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Set + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Set packed 8-bit integers in "dst" with the supplied values. + +dst[7:0] := e0 +dst[15:8] := e1 +dst[23:16] := e2 +dst[31:24] := e3 +dst[39:32] := e4 +dst[47:40] := e5 +dst[55:48] := e6 +dst[63:56] := e7 +dst[71:64] := e8 +dst[79:72] := e9 +dst[87:80] := e10 +dst[95:88] := e11 +dst[103:96] := e12 +dst[111:104] := e13 +dst[119:112] := e14 +dst[127:120] := e15 +dst[135:128] := e16 +dst[143:136] := e17 +dst[151:144] := e18 +dst[159:152] := e19 +dst[167:160] := e20 +dst[175:168] := e21 +dst[183:176] := e22 +dst[191:184] := e23 +dst[199:192] := e24 +dst[207:200] := e25 +dst[215:208] := e26 +dst[223:216] := e27 +dst[231:224] := e28 +dst[239:232] := e29 +dst[247:240] := e30 +dst[255:248] := e31 +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Set
- - - - - - - - - - - - - - - - - - Set packed 16-bit integers in "dst" with the supplied values. - - dst[15:0] := e0 - dst[31:16] := e1 - dst[47:32] := e2 - dst[63:48] := e3 - dst[79:64] := e4 - dst[95:80] := e5 - dst[111:96] := e6 - dst[127:112] := e7 - dst[143:128] := e8 - dst[159:144] := e9 - dst[175:160] := e10 - dst[191:176] := e11 - dst[207:192] := e12 - dst[223:208] := e13 - dst[239:224] := e14 - dst[255:240] := e15 - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Set + + + + + + + + + + + + + + + + + + Set packed 16-bit integers in "dst" with the supplied values. + +dst[15:0] := e0 +dst[31:16] := e1 +dst[47:32] := e2 +dst[63:48] := e3 +dst[79:64] := e4 +dst[95:80] := e5 +dst[111:96] := e6 +dst[127:112] := e7 +dst[143:128] := e8 +dst[159:144] := e9 +dst[175:160] := e10 +dst[191:176] := e11 +dst[207:192] := e12 +dst[223:208] := e13 +dst[239:224] := e14 +dst[255:240] := e15 +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Set
- - - - - - - - - - Set packed 32-bit integers in "dst" with the supplied values. - - dst[31:0] := e0 - dst[63:32] := e1 - dst[95:64] := e2 - dst[127:96] := e3 - dst[159:128] := e4 - dst[191:160] := e5 - dst[223:192] := e6 - dst[255:224] := e7 - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Set + + + + + + + + + + Set packed 32-bit integers in "dst" with the supplied values. + +dst[31:0] := e0 +dst[63:32] := e1 +dst[95:64] := e2 +dst[127:96] := e3 +dst[159:128] := e4 +dst[191:160] := e5 +dst[223:192] := e6 +dst[255:224] := e7 +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Set
- - - - - - Set packed 64-bit integers in "dst" with the supplied values. - - dst[63:0] := e0 - dst[127:64] := e1 - dst[191:128] := e2 - dst[255:192] := e3 - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Set + + + + + + Set packed 64-bit integers in "dst" with the supplied values. + +dst[63:0] := e0 +dst[127:64] := e1 +dst[191:128] := e2 +dst[255:192] := e3 +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Set
- - - - - - Set packed double-precision (64-bit) floating-point elements in "dst" with the - supplied values in reverse order. - - dst[63:0] := e3 - dst[127:64] := e2 - dst[191:128] := e1 - dst[255:192] := e0 - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Set + + + + + + Set packed double-precision (64-bit) floating-point elements in "dst" with the supplied values in reverse order. + +dst[63:0] := e3 +dst[127:64] := e2 +dst[191:128] := e1 +dst[255:192] := e0 +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Set
- - - - - - - - - - Set packed single-precision (32-bit) floating-point elements in "dst" with the - supplied values in reverse order. - - dst[31:0] := e7 - dst[63:32] := e6 - dst[95:64] := e5 - dst[127:96] := e4 - dst[159:128] := e3 - dst[191:160] := e2 - dst[223:192] := e1 - dst[255:224] := e0 - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Set + + + + + + + + + + Set packed single-precision (32-bit) floating-point elements in "dst" with the supplied values in reverse order. + +dst[31:0] := e7 +dst[63:32] := e6 +dst[95:64] := e5 +dst[127:96] := e4 +dst[159:128] := e3 +dst[191:160] := e2 +dst[223:192] := e1 +dst[255:224] := e0 +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Set
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Set packed 8-bit integers in "dst" with the supplied values in reverse order. - - dst[7:0] := e31 - dst[15:8] := e30 - dst[23:16] := e29 - dst[31:24] := e28 - dst[39:32] := e27 - dst[47:40] := e26 - dst[55:48] := e25 - dst[63:56] := e24 - dst[71:64] := e23 - dst[79:72] := e22 - dst[87:80] := e21 - dst[95:88] := e20 - dst[103:96] := e19 - dst[111:104] := e18 - dst[119:112] := e17 - dst[127:120] := e16 - dst[135:128] := e15 - dst[143:136] := e14 - dst[151:144] := e13 - dst[159:152] := e12 - dst[167:160] := e11 - dst[175:168] := e10 - dst[183:176] := e9 - dst[191:184] := e8 - dst[199:192] := e7 - dst[207:200] := e6 - dst[215:208] := e5 - dst[223:216] := e4 - dst[231:224] := e3 - dst[239:232] := e2 - dst[247:240] := e1 - dst[255:248] := e0 - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Set + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Set packed 8-bit integers in "dst" with the supplied values in reverse order. + +dst[7:0] := e31 +dst[15:8] := e30 +dst[23:16] := e29 +dst[31:24] := e28 +dst[39:32] := e27 +dst[47:40] := e26 +dst[55:48] := e25 +dst[63:56] := e24 +dst[71:64] := e23 +dst[79:72] := e22 +dst[87:80] := e21 +dst[95:88] := e20 +dst[103:96] := e19 +dst[111:104] := e18 +dst[119:112] := e17 +dst[127:120] := e16 +dst[135:128] := e15 +dst[143:136] := e14 +dst[151:144] := e13 +dst[159:152] := e12 +dst[167:160] := e11 +dst[175:168] := e10 +dst[183:176] := e9 +dst[191:184] := e8 +dst[199:192] := e7 +dst[207:200] := e6 +dst[215:208] := e5 +dst[223:216] := e4 +dst[231:224] := e3 +dst[239:232] := e2 +dst[247:240] := e1 +dst[255:248] := e0 +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Set
- - - - - - - - - - - - - - - - - - Set packed 16-bit integers in "dst" with the supplied values in reverse order. - - dst[15:0] := e15 - dst[31:16] := e14 - dst[47:32] := e13 - dst[63:48] := e12 - dst[79:64] := e11 - dst[95:80] := e10 - dst[111:96] := e9 - dst[127:112] := e8 - dst[143:128] := e7 - dst[159:144] := e6 - dst[175:160] := e5 - dst[191:176] := e4 - dst[207:192] := e3 - dst[223:208] := e2 - dst[239:224] := e1 - dst[255:240] := e0 - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Set + + + + + + + + + + + + + + + + + + Set packed 16-bit integers in "dst" with the supplied values in reverse order. + +dst[15:0] := e15 +dst[31:16] := e14 +dst[47:32] := e13 +dst[63:48] := e12 +dst[79:64] := e11 +dst[95:80] := e10 +dst[111:96] := e9 +dst[127:112] := e8 +dst[143:128] := e7 +dst[159:144] := e6 +dst[175:160] := e5 +dst[191:176] := e4 +dst[207:192] := e3 +dst[223:208] := e2 +dst[239:224] := e1 +dst[255:240] := e0 +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Set
- - - - - - - - - - Set packed 32-bit integers in "dst" with the supplied values in reverse order. - - dst[31:0] := e7 - dst[63:32] := e6 - dst[95:64] := e5 - dst[127:96] := e4 - dst[159:128] := e3 - dst[191:160] := e2 - dst[223:192] := e1 - dst[255:224] := e0 - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Set + + + + + + + + + + Set packed 32-bit integers in "dst" with the supplied values in reverse order. + +dst[31:0] := e7 +dst[63:32] := e6 +dst[95:64] := e5 +dst[127:96] := e4 +dst[159:128] := e3 +dst[191:160] := e2 +dst[223:192] := e1 +dst[255:224] := e0 +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Set
- - - - - - Set packed 64-bit integers in "dst" with the supplied values in reverse order. - - dst[63:0] := e3 - dst[127:64] := e2 - dst[191:128] := e1 - dst[255:192] := e0 - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Set + + + + + + Set packed 64-bit integers in "dst" with the supplied values in reverse order. + +dst[63:0] := e3 +dst[127:64] := e2 +dst[191:128] := e1 +dst[255:192] := e0 +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Set
- - - Broadcast double-precision (64-bit) floating-point value "a" to all elements of - "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := a[63:0] - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Set + + + Broadcast double-precision (64-bit) floating-point value "a" to all elements of "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Set
- - - Broadcast single-precision (32-bit) floating-point value "a" to all elements of - "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := a[31:0] - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Set + + + Broadcast single-precision (32-bit) floating-point value "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Set
- - - Broadcast 8-bit integer "a" to all elements of "dst". This intrinsic may - generate the "vpbroadcastb". - - FOR j := 0 to 31 - i := j*8 - dst[i+7:i] := a[7:0] - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Set + + + Broadcast 8-bit integer "a" to all elements of "dst". This intrinsic may generate the "vpbroadcastb". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := a[7:0] +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Set
- - - Broadcast 16-bit integer "a" to all all elements of "dst". This intrinsic may - generate the "vpbroadcastw". - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := a[15:0] - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Set + + + Broadcast 16-bit integer "a" to all all elements of "dst". This intrinsic may generate the "vpbroadcastw". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := a[15:0] +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Set
- - - Broadcast 32-bit integer "a" to all elements of "dst". This intrinsic may - generate the "vpbroadcastd". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := a[31:0] - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Set + + + Broadcast 32-bit integer "a" to all elements of "dst". This intrinsic may generate the "vpbroadcastd". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Set
- - - Broadcast 64-bit integer "a" to all elements of "dst". This intrinsic may - generate the "vpbroadcastq". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := a[63:0] - ENDFOR - dst[MAX:256] := 0 - - AVX -
immintrin.h
- Set + + + Broadcast 64-bit integer "a" to all elements of "dst". This intrinsic may generate the "vpbroadcastq". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR +dst[MAX:256] := 0 + + AVX +
immintrin.h
+ Set
- - - - Set packed __m256 vector "dst" with the supplied values. - - dst[127:0] := lo[127:0] - dst[255:128] := hi[127:0] - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Set + + + + Set packed __m256 vector "dst" with the supplied values. + +dst[127:0] := lo[127:0] +dst[255:128] := hi[127:0] +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Set
- - - - Set packed __m256d vector "dst" with the supplied values. - - dst[127:0] := lo[127:0] - dst[255:128] := hi[127:0] - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Set + + + + Set packed __m256d vector "dst" with the supplied values. + +dst[127:0] := lo[127:0] +dst[255:128] := hi[127:0] +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Set
- - - - Set packed __m256i vector "dst" with the supplied values. - - dst[127:0] := lo[127:0] - dst[255:128] := hi[127:0] - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Set + + + + Set packed __m256i vector "dst" with the supplied values. + +dst[127:0] := lo[127:0] +dst[255:128] := hi[127:0] +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Set
- - - - Set packed __m256 vector "dst" with the supplied values. - - dst[127:0] := lo[127:0] - dst[255:128] := hi[127:0] - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Set + + + + Set packed __m256 vector "dst" with the supplied values. + +dst[127:0] := lo[127:0] +dst[255:128] := hi[127:0] +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Set
- - - - Set packed __m256d vector "dst" with the supplied values. - - dst[127:0] := lo[127:0] - dst[255:128] := hi[127:0] - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Set + + + + Set packed __m256d vector "dst" with the supplied values. + +dst[127:0] := lo[127:0] +dst[255:128] := hi[127:0] +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Set
- - - - Set packed __m256i vector "dst" with the supplied values. - - dst[127:0] := lo[127:0] - dst[255:128] := hi[127:0] - dst[MAX:256] := 0 - - - AVX -
immintrin.h
- Set + + + + Set packed __m256i vector "dst" with the supplied values. + +dst[127:0] := lo[127:0] +dst[255:128] := hi[127:0] +dst[MAX:256] := 0 + + + AVX +
immintrin.h
+ Set
- - - Cast vector of type __m256d to type __m256. - This intrinsic is only used for compilation and does not generate any instructions, thus - it has zero latency. - AVX -
immintrin.h
- Cast + + + Cast vector of type __m256d to type __m256. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX +
immintrin.h
+ Cast
- - - Cast vector of type __m256 to type __m256d. - This intrinsic is only used for compilation and does not generate any instructions, thus - it has zero latency. - AVX -
immintrin.h
- Cast + + + Cast vector of type __m256 to type __m256d. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX +
immintrin.h
+ Cast
- - - Cast vector of type __m256 to type __m256i. This intrinsic is only used for - compilation and does not generate any instructions, thus it has zero latency. - AVX -
immintrin.h
- Cast + + + Cast vector of type __m256 to type __m256i. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX +
immintrin.h
+ Cast
- - - Cast vector of type __m256d to type __m256i. This intrinsic is only used for - compilation and does not generate any instructions, thus it has zero latency. - AVX -
immintrin.h
- Cast + + + Cast vector of type __m256d to type __m256i. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX +
immintrin.h
+ Cast
- - - Cast vector of type __m256i to type __m256. This intrinsic is only used for - compilation and does not generate any instructions, thus it has zero latency. - AVX -
immintrin.h
- Cast + + + Cast vector of type __m256i to type __m256. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX +
immintrin.h
+ Cast
- - - Cast vector of type __m256i to type __m256d. This intrinsic is only used for - compilation and does not generate any instructions, thus it has zero latency. - AVX -
immintrin.h
- Cast + + + Cast vector of type __m256i to type __m256d. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX +
immintrin.h
+ Cast
- - - Cast vector of type __m256 to type __m128. This intrinsic is only used for - compilation and does not generate any instructions, thus it has zero latency. - AVX -
immintrin.h
- Cast + + + Cast vector of type __m256 to type __m128. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX +
immintrin.h
+ Cast
- - - Cast vector of type __m256d to type __m128d. This intrinsic is only used for - compilation and does not generate any instructions, thus it has zero latency. - AVX -
immintrin.h
- Cast + + + Cast vector of type __m256d to type __m128d. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX +
immintrin.h
+ Cast
- - - Cast vector of type __m256i to type __m128i. This intrinsic is only used for - compilation and does not generate any instructions, thus it has zero latency. - AVX -
immintrin.h
- Cast + + + Cast vector of type __m256i to type __m128i. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX +
immintrin.h
+ Cast
- - - Cast vector of type __m128 to type __m256; the upper 128 bits of the result are - undefined. This intrinsic is only used for compilation and does not generate any - instructions, thus it has zero latency. - AVX -
immintrin.h
- Cast + + + Cast vector of type __m128 to type __m256; the upper 128 bits of the result are undefined. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX +
immintrin.h
+ Cast
- - - Cast vector of type __m128d to type __m256d; the upper 128 bits of the result - are undefined. This intrinsic is only used for compilation and does not generate any - instructions, thus it has zero latency. - AVX -
immintrin.h
- Cast + + + Cast vector of type __m128d to type __m256d; the upper 128 bits of the result are undefined. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX +
immintrin.h
+ Cast
- - - Cast vector of type __m128i to type __m256i; the upper 128 bits of the result - are undefined. This intrinsic is only used for compilation and does not generate any - instructions, thus it has zero latency. - AVX -
immintrin.h
- Cast + + + Cast vector of type __m128i to type __m256i; the upper 128 bits of the result are undefined. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX +
immintrin.h
+ Cast
- - - Cast vector of type __m128 to type __m256; the upper 128 bits of the result are - zeroed. This intrinsic is only used for compilation and does not generate any - instructions, thus it has zero latency. - AVX -
immintrin.h
- Cast + + + Cast vector of type __m128 to type __m256; the upper 128 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX +
immintrin.h
+ Cast
- - - Cast vector of type __m128d to type __m256d; the upper 128 bits of the result - are zeroed. This intrinsic is only used for compilation and does not generate any - instructions, thus it has zero latency. - AVX -
immintrin.h
- Cast + + + Cast vector of type __m128d to type __m256d; the upper 128 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX +
immintrin.h
+ Cast
- - - Cast vector of type __m128i to type __m256i; the upper 128 bits of the result - are zeroed. This intrinsic is only used for compilation and does not generate any - instructions, thus it has zero latency. - AVX -
immintrin.h
- Cast -
- - - - - - - Extract an 8-bit integer from "a", selected with "index", and store the result - in "dst". - - dst[7:0] := (a[255:0] >> (index[4:0] * 8))[7:0] - - AVX2 -
immintrin.h
- Swizzle + + + Cast vector of type __m128i to type __m256i; the upper 128 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX +
immintrin.h
+ Cast +
+ + + + + + + Extract an 8-bit integer from "a", selected with "index", and store the result in "dst". + +dst[7:0] := (a[255:0] >> (index[4:0] * 8))[7:0] + + AVX2 +
immintrin.h
+ Swizzle
- - - - Extract a 16-bit integer from "a", selected with "index", and store the result - in "dst". - - dst[15:0] := (a[255:0] >> (index[3:0] * 16))[15:0] - - AVX2 -
immintrin.h
- Swizzle + + + + Extract a 16-bit integer from "a", selected with "index", and store the result in "dst". + +dst[15:0] := (a[255:0] >> (index[3:0] * 16))[15:0] + + AVX2 +
immintrin.h
+ Swizzle
- - - - - Blend packed 16-bit integers from "a" and "b" within 128-bit lanes using - control mask "imm8", and store the results in "dst". - - FOR j := 0 to 15 - i := j*16 - IF imm8[j%8] - dst[i+15:i] := b[i+15:i] - ELSE - dst[i+15:i] := a[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + + + Blend packed 16-bit integers from "a" and "b" within 128-bit lanes using control mask "imm8", and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + IF imm8[j%8] + dst[i+15:i] := b[i+15:i] + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - - - Blend packed 32-bit integers from "a" and "b" using control mask "imm8", and - store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - IF imm8[j] - dst[i+31:i] := b[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + + + Blend packed 32-bit integers from "a" and "b" using control mask "imm8", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF imm8[j] + dst[i+31:i] := b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - - - Blend packed 32-bit integers from "a" and "b" using control mask "imm8", and - store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - IF imm8[j] - dst[i+31:i] := b[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + + + Blend packed 32-bit integers from "a" and "b" using control mask "imm8", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF imm8[j] + dst[i+31:i] := b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - - - Blend packed 8-bit integers from "a" and "b" using "mask", and store the - results in "dst". - - FOR j := 0 to 31 - i := j*8 - IF mask[i+7] - dst[i+7:i] := b[i+7:i] - ELSE - dst[i+7:i] := a[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + + + Blend packed 8-bit integers from "a" and "b" using "mask", and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + IF mask[i+7] + dst[i+7:i] := b[i+7:i] + ELSE + dst[i+7:i] := a[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - Broadcast the low packed 8-bit integer from "a" to all elements of "dst". - - FOR j := 0 to 15 - i := j*8 - dst[i+7:i] := a[7:0] - ENDFOR - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + Broadcast the low packed 8-bit integer from "a" to all elements of "dst". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := a[7:0] +ENDFOR +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - Broadcast the low packed 8-bit integer from "a" to all elements of "dst". - - FOR j := 0 to 31 - i := j*8 - dst[i+7:i] := a[7:0] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + Broadcast the low packed 8-bit integer from "a" to all elements of "dst". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := a[7:0] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - Broadcast the low packed 32-bit integer from "a" to all elements of "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := a[31:0] - ENDFOR - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + Broadcast the low packed 32-bit integer from "a" to all elements of "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - Broadcast the low packed 32-bit integer from "a" to all elements of "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := a[31:0] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + Broadcast the low packed 32-bit integer from "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - Broadcast the low packed 64-bit integer from "a" to all elements of "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := a[63:0] - ENDFOR - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + Broadcast the low packed 64-bit integer from "a" to all elements of "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - Broadcast the low packed 64-bit integer from "a" to all elements of "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := a[63:0] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + Broadcast the low packed 64-bit integer from "a" to all elements of "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - Broadcast the low double-precision (64-bit) floating-point element from "a" to - all elements of "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := a[63:0] - ENDFOR - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + Broadcast the low double-precision (64-bit) floating-point element from "a" to all elements of "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - Broadcast the low double-precision (64-bit) floating-point element from "a" to - all elements of "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := a[63:0] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + Broadcast the low double-precision (64-bit) floating-point element from "a" to all elements of "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - Broadcast 128 bits of integer data from "a" to all 128-bit lanes in "dst". - - dst[127:0] := a[127:0] - dst[255:128] := a[127:0] - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + Broadcast 128 bits of integer data from "a" to all 128-bit lanes in "dst". + +dst[127:0] := a[127:0] +dst[255:128] := a[127:0] +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - Broadcast 128 bits of integer data from "a" to all 128-bit lanes in "dst". - - dst[127:0] := a[127:0] - dst[255:128] := a[127:0] - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + Broadcast 128 bits of integer data from "a" to all 128-bit lanes in "dst". + +dst[127:0] := a[127:0] +dst[255:128] := a[127:0] +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - Broadcast the low single-precision (32-bit) floating-point element from "a" to - all elements of "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := a[31:0] - ENDFOR - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - Broadcast the low single-precision (32-bit) floating-point element from "a" to - all elements of "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := a[31:0] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - Broadcast the low packed 16-bit integer from "a" to all elements of "dst". - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := a[15:0] - ENDFOR - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + Broadcast the low packed 16-bit integer from "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := a[15:0] +ENDFOR +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - Broadcast the low packed 16-bit integer from "a" to all elements of "dst". - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := a[15:0] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + Broadcast the low packed 16-bit integer from "a" to all elements of "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := a[15:0] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - - Extract 128 bits (composed of integer data) from "a", selected with "imm8", and - store the result in "dst". - - CASE imm8[0] OF - 0: dst[127:0] := a[127:0] - 1: dst[127:0] := a[255:128] - ESAC - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + + Extract 128 bits (composed of integer data) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[0] OF +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +ESAC +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - - - Copy "a" to "dst", then insert 128 bits (composed of integer data) from "b" - into "dst" at the location specified by "imm8". - - dst[255:0] := a[255:0] - CASE (imm8[0]) OF - 0: dst[127:0] := b[127:0] - 1: dst[255:128] := b[127:0] - ESAC - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + + + Copy "a" to "dst", then insert 128 bits (composed of integer data) from "b" into "dst" at the location specified by "imm8". + +dst[255:0] := a[255:0] +CASE (imm8[0]) OF +0: dst[127:0] := b[127:0] +1: dst[255:128] := b[127:0] +ESAC +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - - - Shuffle 128-bits (composed of integer data) selected by "imm8" from "a" and - "b", and store the results in "dst". - - DEFINE SELECT4(src1, src2, control) { - CASE(control[1:0]) OF - 0: tmp[127:0] := src1[127:0] - 1: tmp[127:0] := src1[255:128] - 2: tmp[127:0] := src2[127:0] - 3: tmp[127:0] := src2[255:128] - ESAC - IF control[3] - tmp[127:0] := 0 - FI - RETURN tmp[127:0] - } - dst[127:0] := SELECT4(a[255:0], b[255:0], imm8[3:0]) - dst[255:128] := SELECT4(a[255:0], b[255:0], imm8[7:4]) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + + + Shuffle 128-bits (composed of integer data) selected by "imm8" from "a" and "b", and store the results in "dst". + +DEFINE SELECT4(src1, src2, control) { + CASE(control[1:0]) OF + 0: tmp[127:0] := src1[127:0] + 1: tmp[127:0] := src1[255:128] + 2: tmp[127:0] := src2[127:0] + 3: tmp[127:0] := src2[255:128] + ESAC + IF control[3] + tmp[127:0] := 0 + FI + RETURN tmp[127:0] +} +dst[127:0] := SELECT4(a[255:0], b[255:0], imm8[3:0]) +dst[255:128] := SELECT4(a[255:0], b[255:0], imm8[7:4]) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - - Shuffle 64-bit integers in "a" across lanes using the control in "imm8", and - store the results in "dst". - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[63:0] := src[63:0] - 1: tmp[63:0] := src[127:64] - 2: tmp[63:0] := src[191:128] - 3: tmp[63:0] := src[255:192] - ESAC - RETURN tmp[63:0] - } - dst[63:0] := SELECT4(a[255:0], imm8[1:0]) - dst[127:64] := SELECT4(a[255:0], imm8[3:2]) - dst[191:128] := SELECT4(a[255:0], imm8[5:4]) - dst[255:192] := SELECT4(a[255:0], imm8[7:6]) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + + Shuffle 64-bit integers in "a" across lanes using the control in "imm8", and store the results in "dst". + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} +dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - - Shuffle double-precision (64-bit) floating-point elements in "a" across lanes - using the control in "imm8", and store the results in "dst". - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[63:0] := src[63:0] - 1: tmp[63:0] := src[127:64] - 2: tmp[63:0] := src[191:128] - 3: tmp[63:0] := src[255:192] - ESAC - RETURN tmp[63:0] - } - dst[63:0] := SELECT4(a[255:0], imm8[1:0]) - dst[127:64] := SELECT4(a[255:0], imm8[3:2]) - dst[191:128] := SELECT4(a[255:0], imm8[5:4]) - dst[255:192] := SELECT4(a[255:0], imm8[7:6]) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + + Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the control in "imm8", and store the results in "dst". + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} +dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - - Shuffle 32-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - id := idx[i+2:i]*32 - dst[i+31:i] := a[id+31:id] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + + Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + id := idx[i+2:i]*32 + dst[i+31:i] := a[id+31:id] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - - Shuffle single-precision (32-bit) floating-point elements in "a" across lanes - using the corresponding index in "idx". - - FOR j := 0 to 7 - i := j*32 - id := idx[i+2:i]*32 - dst[i+31:i] := a[id+31:id] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + + Shuffle single-precision (32-bit) floating-point elements in "a" across lanes using the corresponding index in "idx". + +FOR j := 0 to 7 + i := j*32 + id := idx[i+2:i]*32 + dst[i+31:i] := a[id+31:id] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - - Shuffle 32-bit integers in "a" within 128-bit lanes using the control in - "imm8", and store the results in "dst". - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - dst[95:64] := SELECT4(a[127:0], imm8[5:4]) - dst[127:96] := SELECT4(a[127:0], imm8[7:6]) - dst[159:128] := SELECT4(a[255:128], imm8[1:0]) - dst[191:160] := SELECT4(a[255:128], imm8[3:2]) - dst[223:192] := SELECT4(a[255:128], imm8[5:4]) - dst[255:224] := SELECT4(a[255:128], imm8[7:6]) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + + Shuffle 32-bit integers in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst". + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +dst[223:192] := SELECT4(a[255:128], imm8[5:4]) +dst[255:224] := SELECT4(a[255:128], imm8[7:6]) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - - Shuffle 8-bit integers in "a" within 128-bit lanes according to shuffle control - mask in the corresponding 8-bit element of "b", and store the results in "dst". - - FOR j := 0 to 15 - i := j*8 - IF b[i+7] == 1 - dst[i+7:i] := 0 - ELSE - index[3:0] := b[i+3:i] - dst[i+7:i] := a[index*8+7:index*8] - FI - IF b[128+i+7] == 1 - dst[128+i+7:128+i] := 0 - ELSE - index[3:0] := b[128+i+3:128+i] - dst[128+i+7:128+i] := a[128+index*8+7:128+index*8] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + + Shuffle 8-bit integers in "a" within 128-bit lanes according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + IF b[i+7] == 1 + dst[i+7:i] := 0 + ELSE + index[3:0] := b[i+3:i] + dst[i+7:i] := a[index*8+7:index*8] + FI + IF b[128+i+7] == 1 + dst[128+i+7:128+i] := 0 + ELSE + index[3:0] := b[128+i+3:128+i] + dst[128+i+7:128+i] := a[128+index*8+7:128+index*8] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - - Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of "a" using the - control in "imm8". Store the results in the high 64 bits of 128-bit lanes of "dst", with - the low 64 bits of 128-bit lanes being copied from from "a" to "dst". - - dst[63:0] := a[63:0] - dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] - dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] - dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] - dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] - dst[191:128] := a[191:128] - dst[207:192] := (a >> (imm8[1:0] * 16))[207:192] - dst[223:208] := (a >> (imm8[3:2] * 16))[207:192] - dst[239:224] := (a >> (imm8[5:4] * 16))[207:192] - dst[255:240] := (a >> (imm8[7:6] * 16))[207:192] - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + + Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the high 64 bits of 128-bit lanes of "dst", with the low 64 bits of 128-bit lanes being copied from from "a" to "dst". + +dst[63:0] := a[63:0] +dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] +dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] +dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] +dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] +dst[191:128] := a[191:128] +dst[207:192] := (a >> (imm8[1:0] * 16))[207:192] +dst[223:208] := (a >> (imm8[3:2] * 16))[207:192] +dst[239:224] := (a >> (imm8[5:4] * 16))[207:192] +dst[255:240] := (a >> (imm8[7:6] * 16))[207:192] +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - - Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of "a" using the - control in "imm8". Store the results in the low 64 bits of 128-bit lanes of "dst", with - the high 64 bits of 128-bit lanes being copied from from "a" to "dst". - - dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] - dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] - dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] - dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] - dst[127:64] := a[127:64] - dst[143:128] := (a >> (imm8[1:0] * 16))[143:128] - dst[159:144] := (a >> (imm8[3:2] * 16))[143:128] - dst[175:160] := (a >> (imm8[5:4] * 16))[143:128] - dst[191:176] := (a >> (imm8[7:6] * 16))[143:128] - dst[255:192] := a[255:192] - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + + Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the low 64 bits of 128-bit lanes of "dst", with the high 64 bits of 128-bit lanes being copied from from "a" to "dst". + +dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] +dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] +dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] +dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] +dst[127:64] := a[127:64] +dst[143:128] := (a >> (imm8[1:0] * 16))[143:128] +dst[159:144] := (a >> (imm8[3:2] * 16))[143:128] +dst[175:160] := (a >> (imm8[5:4] * 16))[143:128] +dst[191:176] := (a >> (imm8[7:6] * 16))[143:128] +dst[255:192] := a[255:192] +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - - Unpack and interleave 8-bit integers from the high half of each 128-bit lane in - "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) { - dst[7:0] := src1[71:64] - dst[15:8] := src2[71:64] - dst[23:16] := src1[79:72] - dst[31:24] := src2[79:72] - dst[39:32] := src1[87:80] - dst[47:40] := src2[87:80] - dst[55:48] := src1[95:88] - dst[63:56] := src2[95:88] - dst[71:64] := src1[103:96] - dst[79:72] := src2[103:96] - dst[87:80] := src1[111:104] - dst[95:88] := src2[111:104] - dst[103:96] := src1[119:112] - dst[111:104] := src2[119:112] - dst[119:112] := src1[127:120] - dst[127:120] := src2[127:120] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) - dst[255:128] := INTERLEAVE_HIGH_BYTES(a[255:128], b[255:128]) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + + Unpack and interleave 8-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) { + dst[7:0] := src1[71:64] + dst[15:8] := src2[71:64] + dst[23:16] := src1[79:72] + dst[31:24] := src2[79:72] + dst[39:32] := src1[87:80] + dst[47:40] := src2[87:80] + dst[55:48] := src1[95:88] + dst[63:56] := src2[95:88] + dst[71:64] := src1[103:96] + dst[79:72] := src2[103:96] + dst[87:80] := src1[111:104] + dst[95:88] := src2[111:104] + dst[103:96] := src1[119:112] + dst[111:104] := src2[119:112] + dst[119:112] := src1[127:120] + dst[127:120] := src2[127:120] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_HIGH_BYTES(a[255:128], b[255:128]) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - - Unpack and interleave 16-bit integers from the high half of each 128-bit lane - in "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) { - dst[15:0] := src1[79:64] - dst[31:16] := src2[79:64] - dst[47:32] := src1[95:80] - dst[63:48] := src2[95:80] - dst[79:64] := src1[111:96] - dst[95:80] := src2[111:96] - dst[111:96] := src1[127:112] - dst[127:112] := src2[127:112] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) - dst[255:128] := INTERLEAVE_HIGH_WORDS(a[255:128], b[255:128]) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + + Unpack and interleave 16-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) { + dst[15:0] := src1[79:64] + dst[31:16] := src2[79:64] + dst[47:32] := src1[95:80] + dst[63:48] := src2[95:80] + dst[79:64] := src1[111:96] + dst[95:80] := src2[111:96] + dst[111:96] := src1[127:112] + dst[127:112] := src2[127:112] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_HIGH_WORDS(a[255:128], b[255:128]) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - - Unpack and interleave 32-bit integers from the high half of each 128-bit lane - in "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[95:64] - dst[63:32] := src2[95:64] - dst[95:64] := src1[127:96] - dst[127:96] := src2[127:96] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) - dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + + Unpack and interleave 32-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - - Unpack and interleave 64-bit integers from the high half of each 128-bit lane - in "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[127:64] - dst[127:64] := src2[127:64] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) - dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + + Unpack and interleave 64-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - - Unpack and interleave 8-bit integers from the low half of each 128-bit lane in - "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) { - dst[7:0] := src1[7:0] - dst[15:8] := src2[7:0] - dst[23:16] := src1[15:8] - dst[31:24] := src2[15:8] - dst[39:32] := src1[23:16] - dst[47:40] := src2[23:16] - dst[55:48] := src1[31:24] - dst[63:56] := src2[31:24] - dst[71:64] := src1[39:32] - dst[79:72] := src2[39:32] - dst[87:80] := src1[47:40] - dst[95:88] := src2[47:40] - dst[103:96] := src1[55:48] - dst[111:104] := src2[55:48] - dst[119:112] := src1[63:56] - dst[127:120] := src2[63:56] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) - dst[255:128] := INTERLEAVE_BYTES(a[255:128], b[255:128]) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + + Unpack and interleave 8-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) { + dst[7:0] := src1[7:0] + dst[15:8] := src2[7:0] + dst[23:16] := src1[15:8] + dst[31:24] := src2[15:8] + dst[39:32] := src1[23:16] + dst[47:40] := src2[23:16] + dst[55:48] := src1[31:24] + dst[63:56] := src2[31:24] + dst[71:64] := src1[39:32] + dst[79:72] := src2[39:32] + dst[87:80] := src1[47:40] + dst[95:88] := src2[47:40] + dst[103:96] := src1[55:48] + dst[111:104] := src2[55:48] + dst[119:112] := src1[63:56] + dst[127:120] := src2[63:56] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_BYTES(a[255:128], b[255:128]) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - - Unpack and interleave 16-bit integers from the low half of each 128-bit lane in - "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) { - dst[15:0] := src1[15:0] - dst[31:16] := src2[15:0] - dst[47:32] := src1[31:16] - dst[63:48] := src2[31:16] - dst[79:64] := src1[47:32] - dst[95:80] := src2[47:32] - dst[111:96] := src1[63:48] - dst[127:112] := src2[63:48] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) - dst[255:128] := INTERLEAVE_WORDS(a[255:128], b[255:128]) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + + Unpack and interleave 16-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) { + dst[15:0] := src1[15:0] + dst[31:16] := src2[15:0] + dst[47:32] := src1[31:16] + dst[63:48] := src2[31:16] + dst[79:64] := src1[47:32] + dst[95:80] := src2[47:32] + dst[111:96] := src1[63:48] + dst[127:112] := src2[63:48] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_WORDS(a[255:128], b[255:128]) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - - Unpack and interleave 32-bit integers from the low half of each 128-bit lane in - "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[31:0] - dst[63:32] := src2[31:0] - dst[95:64] := src1[63:32] - dst[127:96] := src2[63:32] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) - dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + + Unpack and interleave 32-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - - Unpack and interleave 64-bit integers from the low half of each 128-bit lane in - "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[63:0] - dst[127:64] := src2[63:0] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) - dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Swizzle + + + + Unpack and interleave 64-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Swizzle
- - - Compute the absolute value of packed signed 8-bit integers in "a", and store - the unsigned results in "dst". - - FOR j := 0 to 31 - i := j*8 - dst[i+7:i] := ABS(a[i+7:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Special Math Functions + + + Compute the absolute value of packed signed 8-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := ABS(a[i+7:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Special Math Functions
- - - Compute the absolute value of packed signed 16-bit integers in "a", and store - the unsigned results in "dst". - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := ABS(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Special Math Functions + + + Compute the absolute value of packed signed 16-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := ABS(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Special Math Functions
- - - Compute the absolute value of packed signed 32-bit integers in "a", and store - the unsigned results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := ABS(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Special Math Functions + + + Compute the absolute value of packed signed 32-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ABS(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Special Math Functions
- - - - Compare packed signed 8-bit integers in "a" and "b", and store packed maximum - values in "dst". - - FOR j := 0 to 31 - i := j*8 - dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Special Math Functions + + + + Compare packed signed 8-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Special Math Functions
- - - - Compare packed signed 16-bit integers in "a" and "b", and store packed maximum - values in "dst". - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Special Math Functions + + + + Compare packed signed 16-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Special Math Functions
- - - - Compare packed signed 32-bit integers in "a" and "b", and store packed maximum - values in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Special Math Functions + + + + Compare packed signed 32-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Special Math Functions
- - - - Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum - values in "dst". - - FOR j := 0 to 31 - i := j*8 - dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Special Math Functions + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Special Math Functions
- - - - Compare packed unsigned 16-bit integers in "a" and "b", and store packed - maximum values in "dst". - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Special Math Functions + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Special Math Functions
- - - - Compare packed unsigned 32-bit integers in "a" and "b", and store packed - maximum values in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Special Math Functions + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Special Math Functions
- - - - Compare packed signed 8-bit integers in "a" and "b", and store packed minimum - values in "dst". - - FOR j := 0 to 31 - i := j*8 - dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Special Math Functions + + + + Compare packed signed 8-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Special Math Functions
- - - - Compare packed signed 16-bit integers in "a" and "b", and store packed minimum - values in "dst". - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Special Math Functions + + + + Compare packed signed 16-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Special Math Functions
- - - - Compare packed signed 32-bit integers in "a" and "b", and store packed minimum - values in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Special Math Functions + + + + Compare packed signed 32-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Special Math Functions
- - - - Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum - values in "dst". - - FOR j := 0 to 31 - i := j*8 - dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Special Math Functions + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Special Math Functions
- - - - Compare packed unsigned 16-bit integers in "a" and "b", and store packed - minimum values in "dst". - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Special Math Functions + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Special Math Functions
- - - - Compare packed unsigned 32-bit integers in "a" and "b", and store packed - minimum values in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Special Math Functions + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Special Math Functions
- - - - Add packed 8-bit integers in "a" and "b", and store the results in "dst". - - FOR j := 0 to 31 - i := j*8 - dst[i+7:i] := a[i+7:i] + b[i+7:i] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Add packed 8-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := a[i+7:i] + b[i+7:i] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Add packed 16-bit integers in "a" and "b", and store the results in "dst". - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := a[i+15:i] + b[i+15:i] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Add packed 16-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := a[i+15:i] + b[i+15:i] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Add packed 32-bit integers in "a" and "b", and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := a[i+31:i] + b[i+31:i] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Add packed 32-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := a[i+31:i] + b[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Add packed 64-bit integers in "a" and "b", and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := a[i+63:i] + b[i+63:i] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Add packed 64-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[i+63:i] + b[i+63:i] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Add packed 8-bit integers in "a" and "b" using saturation, and store the - results in "dst". - - FOR j := 0 to 31 - i := j*8 - dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] ) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Add packed 8-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] ) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Add packed 16-bit integers in "a" and "b" using saturation, and store the - results in "dst". - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] ) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Add packed 16-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] ) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store - the results in "dst". - - FOR j := 0 to 31 - i := j*8 - dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] ) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] ) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store - the results in "dst". - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] ) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] ) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Horizontally add adjacent pairs of 16-bit integers in "a" and "b", and pack the - signed 16-bit results in "dst". - - dst[15:0] := a[31:16] + a[15:0] - dst[31:16] := a[63:48] + a[47:32] - dst[47:32] := a[95:80] + a[79:64] - dst[63:48] := a[127:112] + a[111:96] - dst[79:64] := b[31:16] + b[15:0] - dst[95:80] := b[63:48] + b[47:32] - dst[111:96] := b[95:80] + b[79:64] - dst[127:112] := b[127:112] + b[111:96] - dst[143:128] := a[159:144] + a[143:128] - dst[159:144] := a[191:176] + a[175:160] - dst[175:160] := a[223:208] + a[207:192] - dst[191:176] := a[255:240] + a[239:224] - dst[207:192] := b[159:144] + b[143:128] - dst[223:208] := b[191:176] + b[175:160] - dst[239:224] := b[223:208] + b[207:192] - dst[255:240] := b[255:240] + b[239:224] - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Horizontally add adjacent pairs of 16-bit integers in "a" and "b", and pack the signed 16-bit results in "dst". + +dst[15:0] := a[31:16] + a[15:0] +dst[31:16] := a[63:48] + a[47:32] +dst[47:32] := a[95:80] + a[79:64] +dst[63:48] := a[127:112] + a[111:96] +dst[79:64] := b[31:16] + b[15:0] +dst[95:80] := b[63:48] + b[47:32] +dst[111:96] := b[95:80] + b[79:64] +dst[127:112] := b[127:112] + b[111:96] +dst[143:128] := a[159:144] + a[143:128] +dst[159:144] := a[191:176] + a[175:160] +dst[175:160] := a[223:208] + a[207:192] +dst[191:176] := a[255:240] + a[239:224] +dst[207:192] := b[159:144] + b[143:128] +dst[223:208] := b[191:176] + b[175:160] +dst[239:224] := b[223:208] + b[207:192] +dst[255:240] := b[255:240] + b[239:224] +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Horizontally add adjacent pairs of 32-bit integers in "a" and "b", and pack the - signed 32-bit results in "dst". - - dst[31:0] := a[63:32] + a[31:0] - dst[63:32] := a[127:96] + a[95:64] - dst[95:64] := b[63:32] + b[31:0] - dst[127:96] := b[127:96] + b[95:64] - dst[159:128] := a[191:160] + a[159:128] - dst[191:160] := a[255:224] + a[223:192] - dst[223:192] := b[191:160] + b[159:128] - dst[255:224] := b[255:224] + b[223:192] - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Horizontally add adjacent pairs of 32-bit integers in "a" and "b", and pack the signed 32-bit results in "dst". + +dst[31:0] := a[63:32] + a[31:0] +dst[63:32] := a[127:96] + a[95:64] +dst[95:64] := b[63:32] + b[31:0] +dst[127:96] := b[127:96] + b[95:64] +dst[159:128] := a[191:160] + a[159:128] +dst[191:160] := a[255:224] + a[223:192] +dst[223:192] := b[191:160] + b[159:128] +dst[255:224] := b[255:224] + b[223:192] +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Horizontally add adjacent pairs of signed 16-bit integers in "a" and "b" using - saturation, and pack the signed 16-bit results in "dst". - - dst[15:0] := Saturate16(a[31:16] + a[15:0]) - dst[31:16] := Saturate16(a[63:48] + a[47:32]) - dst[47:32] := Saturate16(a[95:80] + a[79:64]) - dst[63:48] := Saturate16(a[127:112] + a[111:96]) - dst[79:64] := Saturate16(b[31:16] + b[15:0]) - dst[95:80] := Saturate16(b[63:48] + b[47:32]) - dst[111:96] := Saturate16(b[95:80] + b[79:64]) - dst[127:112] := Saturate16(b[127:112] + b[111:96]) - dst[143:128] := Saturate16(a[159:144] + a[143:128]) - dst[159:144] := Saturate16(a[191:176] + a[175:160]) - dst[175:160] := Saturate16(a[223:208] + a[207:192]) - dst[191:176] := Saturate16(a[255:240] + a[239:224]) - dst[207:192] := Saturate16(b[159:144] + b[143:128]) - dst[223:208] := Saturate16(b[191:176] + b[175:160]) - dst[239:224] := Saturate16(b[223:208] + b[207:192]) - dst[255:240] := Saturate16(b[255:240] + b[239:224]) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Horizontally add adjacent pairs of signed 16-bit integers in "a" and "b" using saturation, and pack the signed 16-bit results in "dst". + +dst[15:0] := Saturate16(a[31:16] + a[15:0]) +dst[31:16] := Saturate16(a[63:48] + a[47:32]) +dst[47:32] := Saturate16(a[95:80] + a[79:64]) +dst[63:48] := Saturate16(a[127:112] + a[111:96]) +dst[79:64] := Saturate16(b[31:16] + b[15:0]) +dst[95:80] := Saturate16(b[63:48] + b[47:32]) +dst[111:96] := Saturate16(b[95:80] + b[79:64]) +dst[127:112] := Saturate16(b[127:112] + b[111:96]) +dst[143:128] := Saturate16(a[159:144] + a[143:128]) +dst[159:144] := Saturate16(a[191:176] + a[175:160]) +dst[175:160] := Saturate16(a[223:208] + a[207:192]) +dst[191:176] := Saturate16(a[255:240] + a[239:224]) +dst[207:192] := Saturate16(b[159:144] + b[143:128]) +dst[223:208] := Saturate16(b[191:176] + b[175:160]) +dst[239:224] := Saturate16(b[223:208] + b[207:192]) +dst[255:240] := Saturate16(b[255:240] + b[239:224]) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Horizontally subtract adjacent pairs of 16-bit integers in "a" and "b", and - pack the signed 16-bit results in "dst". - - dst[15:0] := a[15:0] - a[31:16] - dst[31:16] := a[47:32] - a[63:48] - dst[47:32] := a[79:64] - a[95:80] - dst[63:48] := a[111:96] - a[127:112] - dst[79:64] := b[15:0] - b[31:16] - dst[95:80] := b[47:32] - b[63:48] - dst[111:96] := b[79:64] - b[95:80] - dst[127:112] := b[111:96] - b[127:112] - dst[143:128] := a[143:128] - a[159:144] - dst[159:144] := a[175:160] - a[191:176] - dst[175:160] := a[207:192] - a[223:208] - dst[191:176] := a[239:224] - a[255:240] - dst[207:192] := b[143:128] - b[159:144] - dst[223:208] := b[175:160] - b[191:176] - dst[239:224] := b[207:192] - b[223:208] - dst[255:240] := b[239:224] - b[255:240] - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Horizontally subtract adjacent pairs of 16-bit integers in "a" and "b", and pack the signed 16-bit results in "dst". + +dst[15:0] := a[15:0] - a[31:16] +dst[31:16] := a[47:32] - a[63:48] +dst[47:32] := a[79:64] - a[95:80] +dst[63:48] := a[111:96] - a[127:112] +dst[79:64] := b[15:0] - b[31:16] +dst[95:80] := b[47:32] - b[63:48] +dst[111:96] := b[79:64] - b[95:80] +dst[127:112] := b[111:96] - b[127:112] +dst[143:128] := a[143:128] - a[159:144] +dst[159:144] := a[175:160] - a[191:176] +dst[175:160] := a[207:192] - a[223:208] +dst[191:176] := a[239:224] - a[255:240] +dst[207:192] := b[143:128] - b[159:144] +dst[223:208] := b[175:160] - b[191:176] +dst[239:224] := b[207:192] - b[223:208] +dst[255:240] := b[239:224] - b[255:240] +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Horizontally subtract adjacent pairs of 32-bit integers in "a" and "b", and - pack the signed 32-bit results in "dst". - - dst[31:0] := a[31:0] - a[63:32] - dst[63:32] := a[95:64] - a[127:96] - dst[95:64] := b[31:0] - b[63:32] - dst[127:96] := b[95:64] - b[127:96] - dst[159:128] := a[159:128] - a[191:160] - dst[191:160] := a[223:192] - a[255:224] - dst[223:192] := b[159:128] - b[191:160] - dst[255:224] := b[223:192] - b[255:224] - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Horizontally subtract adjacent pairs of 32-bit integers in "a" and "b", and pack the signed 32-bit results in "dst". + +dst[31:0] := a[31:0] - a[63:32] +dst[63:32] := a[95:64] - a[127:96] +dst[95:64] := b[31:0] - b[63:32] +dst[127:96] := b[95:64] - b[127:96] +dst[159:128] := a[159:128] - a[191:160] +dst[191:160] := a[223:192] - a[255:224] +dst[223:192] := b[159:128] - b[191:160] +dst[255:224] := b[223:192] - b[255:224] +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Horizontally subtract adjacent pairs of signed 16-bit integers in "a" and "b" - using saturation, and pack the signed 16-bit results in "dst". - - dst[15:0] := Saturate16(a[15:0] - a[31:16]) - dst[31:16] := Saturate16(a[47:32] - a[63:48]) - dst[47:32] := Saturate16(a[79:64] - a[95:80]) - dst[63:48] := Saturate16(a[111:96] - a[127:112]) - dst[79:64] := Saturate16(b[15:0] - b[31:16]) - dst[95:80] := Saturate16(b[47:32] - b[63:48]) - dst[111:96] := Saturate16(b[79:64] - b[95:80]) - dst[127:112] := Saturate16(b[111:96] - b[127:112]) - dst[143:128] := Saturate16(a[143:128] - a[159:144]) - dst[159:144] := Saturate16(a[175:160] - a[191:176]) - dst[175:160] := Saturate16(a[207:192] - a[223:208]) - dst[191:176] := Saturate16(a[239:224] - a[255:240]) - dst[207:192] := Saturate16(b[143:128] - b[159:144]) - dst[223:208] := Saturate16(b[175:160] - b[191:176]) - dst[239:224] := Saturate16(b[207:192] - b[223:208]) - dst[255:240] := Saturate16(b[239:224] - b[255:240]) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Horizontally subtract adjacent pairs of signed 16-bit integers in "a" and "b" using saturation, and pack the signed 16-bit results in "dst". + +dst[15:0] := Saturate16(a[15:0] - a[31:16]) +dst[31:16] := Saturate16(a[47:32] - a[63:48]) +dst[47:32] := Saturate16(a[79:64] - a[95:80]) +dst[63:48] := Saturate16(a[111:96] - a[127:112]) +dst[79:64] := Saturate16(b[15:0] - b[31:16]) +dst[95:80] := Saturate16(b[47:32] - b[63:48]) +dst[111:96] := Saturate16(b[79:64] - b[95:80]) +dst[127:112] := Saturate16(b[111:96] - b[127:112]) +dst[143:128] := Saturate16(a[143:128] - a[159:144]) +dst[159:144] := Saturate16(a[175:160] - a[191:176]) +dst[175:160] := Saturate16(a[207:192] - a[223:208]) +dst[191:176] := Saturate16(a[239:224] - a[255:240]) +dst[207:192] := Saturate16(b[143:128] - b[159:144]) +dst[223:208] := Saturate16(b[175:160] - b[191:176]) +dst[239:224] := Saturate16(b[207:192] - b[223:208]) +dst[255:240] := Saturate16(b[239:224] - b[255:240]) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Multiply packed signed 16-bit integers in "a" and "b", producing intermediate - signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, - and pack the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + - SignExtend32(a[i+15:i]*b[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + SignExtend32(a[i+15:i]*b[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Vertically multiply each unsigned 8-bit integer from "a" with the corresponding - signed 8-bit integer from "b", producing intermediate signed 16-bit integers. - Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the - saturated results in "dst". - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Vertically multiply each unsigned 8-bit integer from "a" with the corresponding signed 8-bit integer from "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Multiply the low signed 32-bit integers from each packed 64-bit element in "a" - and "b", and store the signed 64-bit results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Multiply the low signed 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Multiply the low unsigned 32-bit integers from each packed 64-bit element in - "a" and "b", and store the unsigned 64-bit results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := a[i+31:i] * b[i+31:i] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[i+31:i] * b[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Multiply the packed signed 16-bit integers in "a" and "b", producing - intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in - "dst". - - FOR j := 0 to 15 - i := j*16 - tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) - dst[i+15:i] := tmp[31:16] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Multiply the packed signed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 15 + i := j*16 + tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) + dst[i+15:i] := tmp[31:16] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Multiply the packed unsigned 16-bit integers in "a" and "b", producing - intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in - "dst". - - FOR j := 0 to 15 - i := j*16 - tmp[31:0] := a[i+15:i] * b[i+15:i] - dst[i+15:i] := tmp[31:16] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 15 + i := j*16 + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Multiply packed signed 16-bit integers in "a" and "b", producing intermediate - signed 32-bit integers. Truncate each intermediate integer to the 18 most significant - bits, round by adding 1, and store bits [16:1] to "dst". - - FOR j := 0 to 15 - i := j*16 - tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) >> 14) + 1 - dst[i+15:i] := tmp[16:1] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst". + +FOR j := 0 to 15 + i := j*16 + tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) >> 14) + 1 + dst[i+15:i] := tmp[16:1] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Multiply the packed signed 16-bit integers in "a" and "b", producing - intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in - "dst". - - FOR j := 0 to 15 - i := j*16 - tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) - dst[i+15:i] := tmp[15:0] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Multiply the packed signed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 15 + i := j*16 + tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) + dst[i+15:i] := tmp[15:0] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Multiply the packed signed 32-bit integers in "a" and "b", producing - intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in - "dst". - - FOR j := 0 to 7 - i := j*32 - tmp[63:0] := a[i+31:i] * b[i+31:i] - dst[i+31:i] := tmp[31:0] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Multiply the packed signed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst". + +FOR j := 0 to 7 + i := j*32 + tmp[63:0] := a[i+31:i] * b[i+31:i] + dst[i+31:i] := tmp[31:0] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Compute the absolute differences of packed unsigned 8-bit integers in "a" and - "b", then horizontally sum each consecutive 8 differences to produce four unsigned - 16-bit integers, and pack these unsigned 16-bit integers in the low 16 bits of 64-bit - elements in "dst". - - FOR j := 0 to 31 - i := j*8 - tmp[i+7:i] := ABS(a[i+7:i] - b[i+7:i]) - ENDFOR - FOR j := 0 to 3 - i := j*64 - dst[i+15:i] := tmp[i+7:i] + tmp[i+15:i+8] + tmp[i+23:i+16] + tmp[i+31:i+24] + \ - tmp[i+39:i+32] + tmp[i+47:i+40] + tmp[i+55:i+48] + tmp[i+63:i+56] - dst[i+63:i+16] := 0 - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Compute the absolute differences of packed unsigned 8-bit integers in "a" and "b", then horizontally sum each consecutive 8 differences to produce four unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low 16 bits of 64-bit elements in "dst". + +FOR j := 0 to 31 + i := j*8 + tmp[i+7:i] := ABS(a[i+7:i] - b[i+7:i]) +ENDFOR +FOR j := 0 to 3 + i := j*64 + dst[i+15:i] := tmp[i+7:i] + tmp[i+15:i+8] + tmp[i+23:i+16] + tmp[i+31:i+24] + \ + tmp[i+39:i+32] + tmp[i+47:i+40] + tmp[i+55:i+48] + tmp[i+63:i+56] + dst[i+63:i+16] := 0 +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Negate packed signed 8-bit integers in "a" when the corresponding signed 8-bit - integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed - out when the corresponding element in "b" is zero. - - FOR j := 0 to 31 - i := j*8 - IF b[i+7:i] < 0 - dst[i+7:i] := -(a[i+7:i]) - ELSE IF b[i+7:i] == 0 - dst[i+7:i] := 0 - ELSE - dst[i+7:i] := a[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Negate packed signed 8-bit integers in "a" when the corresponding signed 8-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero. + +FOR j := 0 to 31 + i := j*8 + IF b[i+7:i] < 0 + dst[i+7:i] := -(a[i+7:i]) + ELSE IF b[i+7:i] == 0 + dst[i+7:i] := 0 + ELSE + dst[i+7:i] := a[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Negate packed signed 16-bit integers in "a" when the corresponding signed - 16-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are - zeroed out when the corresponding element in "b" is zero. - - FOR j := 0 to 15 - i := j*16 - IF b[i+15:i] < 0 - dst[i+15:i] := -(a[i+15:i]) - ELSE IF b[i+15:i] == 0 - dst[i+15:i] := 0 - ELSE - dst[i+15:i] := a[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Negate packed signed 16-bit integers in "a" when the corresponding signed 16-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero. + +FOR j := 0 to 15 + i := j*16 + IF b[i+15:i] < 0 + dst[i+15:i] := -(a[i+15:i]) + ELSE IF b[i+15:i] == 0 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Negate packed signed 32-bit integers in "a" when the corresponding signed - 32-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are - zeroed out when the corresponding element in "b" is zero. - - FOR j := 0 to 7 - i := j*32 - IF b[i+31:i] < 0 - dst[i+31:i] := -(a[i+31:i]) - ELSE IF b[i+31:i] == 0 - dst[i+31:i] := 0 - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Negate packed signed 32-bit integers in "a" when the corresponding signed 32-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero. + +FOR j := 0 to 7 + i := j*32 + IF b[i+31:i] < 0 + dst[i+31:i] := -(a[i+31:i]) + ELSE IF b[i+31:i] == 0 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and - store the results in "dst". - - FOR j := 0 to 31 - i := j*8 - dst[i+7:i] := a[i+7:i] - b[i+7:i] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := a[i+7:i] - b[i+7:i] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and - store the results in "dst". - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := a[i+15:i] - b[i+15:i] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := a[i+15:i] - b[i+15:i] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and - store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := a[i+31:i] - b[i+31:i] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := a[i+31:i] - b[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and - store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := a[i+63:i] - b[i+63:i] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[i+63:i] - b[i+63:i] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" - using saturation, and store the results in "dst". - - FOR j := 0 to 31 - i := j*8 - dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in - "a" using saturation, and store the results in "dst". - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit - integers in "a" using saturation, and store the results in "dst". - - FOR j := 0 to 31 - i := j*8 - dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit - integers in "a" using saturation, and store the results in "dst". - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Arithmetic + + + + Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Arithmetic
- - - - - Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary - result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst". - - FOR j := 0 to 1 - i := j*128 - tmp[255:0] := ((a[i+127:i] << 128)[255:0] OR b[i+127:i]) >> (imm8*8) - dst[i+127:i] := tmp[127:0] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Miscellaneous + + + + + Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst". + +FOR j := 0 to 1 + i := j*128 + tmp[255:0] := ((a[i+127:i] << 128)[255:0] OR b[i+127:i]) >> (imm8*8) + dst[i+127:i] := tmp[127:0] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Miscellaneous
- - - Create mask from the most significant bit of each 8-bit element in "a", and - store the result in "dst". - - FOR j := 0 to 31 - i := j*8 - dst[j] := a[i+7] - ENDFOR - - - AVX2 -
immintrin.h
- Miscellaneous + + + Create mask from the most significant bit of each 8-bit element in "a", and store the result in "dst". + +FOR j := 0 to 31 + i := j*8 + dst[j] := a[i+7] +ENDFOR + + + AVX2 +
immintrin.h
+ Miscellaneous
- - - - - Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit - integers in "a" compared to those in "b", and store the 16-bit results in "dst". - Eight SADs are performed for each 128-bit lane using one quadruplet from "b" and eight - quadruplets from "a". One quadruplet is selected from "b" starting at on the offset - specified in "imm8". Eight quadruplets are formed from sequential 8-bit integers - selected from "a" starting at the offset specified in "imm8". - - DEFINE MPSADBW(a[127:0], b[127:0], imm8[2:0]) { - a_offset := imm8[2]*32 - b_offset := imm8[1:0]*32 - FOR j := 0 to 7 - i := j*8 - k := a_offset+i - l := b_offset - tmp[i*2+15:i*2] := ABS(Signed(a[k+7:k] - b[l+7:l])) + ABS(Signed(a[k+15:k+8] - - b[l+15:l+8])) + \ - ABS(Signed(a[k+23:k+16] - b[l+23:l+16])) + ABS(Signed(a[k+31:k+24] - b[l+31:l+24])) - ENDFOR - RETURN tmp[127:0] - } - dst[127:0] := MPSADBW(a[127:0], b[127:0], imm8[2:0]) - dst[255:128] := MPSADBW(a[255:128], b[255:128], imm8[5:3]) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Miscellaneous + + + + + Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst". + Eight SADs are performed for each 128-bit lane using one quadruplet from "b" and eight quadruplets from "a". One quadruplet is selected from "b" starting at on the offset specified in "imm8". Eight quadruplets are formed from sequential 8-bit integers selected from "a" starting at the offset specified in "imm8". + +DEFINE MPSADBW(a[127:0], b[127:0], imm8[2:0]) { + a_offset := imm8[2]*32 + b_offset := imm8[1:0]*32 + FOR j := 0 to 7 + i := j*8 + k := a_offset+i + l := b_offset + tmp[i*2+15:i*2] := ABS(Signed(a[k+7:k] - b[l+7:l])) + ABS(Signed(a[k+15:k+8] - b[l+15:l+8])) + \ + ABS(Signed(a[k+23:k+16] - b[l+23:l+16])) + ABS(Signed(a[k+31:k+24] - b[l+31:l+24])) + ENDFOR + RETURN tmp[127:0] +} +dst[127:0] := MPSADBW(a[127:0], b[127:0], imm8[2:0]) +dst[255:128] := MPSADBW(a[255:128], b[255:128], imm8[5:3]) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Miscellaneous
- - - - Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers - using signed saturation, and store the results in "dst". - - dst[7:0] := Saturate8(a[15:0]) - dst[15:8] := Saturate8(a[31:16]) - dst[23:16] := Saturate8(a[47:32]) - dst[31:24] := Saturate8(a[63:48]) - dst[39:32] := Saturate8(a[79:64]) - dst[47:40] := Saturate8(a[95:80]) - dst[55:48] := Saturate8(a[111:96]) - dst[63:56] := Saturate8(a[127:112]) - dst[71:64] := Saturate8(b[15:0]) - dst[79:72] := Saturate8(b[31:16]) - dst[87:80] := Saturate8(b[47:32]) - dst[95:88] := Saturate8(b[63:48]) - dst[103:96] := Saturate8(b[79:64]) - dst[111:104] := Saturate8(b[95:80]) - dst[119:112] := Saturate8(b[111:96]) - dst[127:120] := Saturate8(b[127:112]) - dst[135:128] := Saturate8(a[143:128]) - dst[143:136] := Saturate8(a[159:144]) - dst[151:144] := Saturate8(a[175:160]) - dst[159:152] := Saturate8(a[191:176]) - dst[167:160] := Saturate8(a[207:192]) - dst[175:168] := Saturate8(a[223:208]) - dst[183:176] := Saturate8(a[239:224]) - dst[191:184] := Saturate8(a[255:240]) - dst[199:192] := Saturate8(b[143:128]) - dst[207:200] := Saturate8(b[159:144]) - dst[215:208] := Saturate8(b[175:160]) - dst[223:216] := Saturate8(b[191:176]) - dst[231:224] := Saturate8(b[207:192]) - dst[239:232] := Saturate8(b[223:208]) - dst[247:240] := Saturate8(b[239:224]) - dst[255:248] := Saturate8(b[255:240]) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Miscellaneous + + + + Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst". + +dst[7:0] := Saturate8(a[15:0]) +dst[15:8] := Saturate8(a[31:16]) +dst[23:16] := Saturate8(a[47:32]) +dst[31:24] := Saturate8(a[63:48]) +dst[39:32] := Saturate8(a[79:64]) +dst[47:40] := Saturate8(a[95:80]) +dst[55:48] := Saturate8(a[111:96]) +dst[63:56] := Saturate8(a[127:112]) +dst[71:64] := Saturate8(b[15:0]) +dst[79:72] := Saturate8(b[31:16]) +dst[87:80] := Saturate8(b[47:32]) +dst[95:88] := Saturate8(b[63:48]) +dst[103:96] := Saturate8(b[79:64]) +dst[111:104] := Saturate8(b[95:80]) +dst[119:112] := Saturate8(b[111:96]) +dst[127:120] := Saturate8(b[127:112]) +dst[135:128] := Saturate8(a[143:128]) +dst[143:136] := Saturate8(a[159:144]) +dst[151:144] := Saturate8(a[175:160]) +dst[159:152] := Saturate8(a[191:176]) +dst[167:160] := Saturate8(a[207:192]) +dst[175:168] := Saturate8(a[223:208]) +dst[183:176] := Saturate8(a[239:224]) +dst[191:184] := Saturate8(a[255:240]) +dst[199:192] := Saturate8(b[143:128]) +dst[207:200] := Saturate8(b[159:144]) +dst[215:208] := Saturate8(b[175:160]) +dst[223:216] := Saturate8(b[191:176]) +dst[231:224] := Saturate8(b[207:192]) +dst[239:232] := Saturate8(b[223:208]) +dst[247:240] := Saturate8(b[239:224]) +dst[255:248] := Saturate8(b[255:240]) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Miscellaneous
- - - - Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit - integers using signed saturation, and store the results in "dst". - - dst[15:0] := Saturate16(a[31:0]) - dst[31:16] := Saturate16(a[63:32]) - dst[47:32] := Saturate16(a[95:64]) - dst[63:48] := Saturate16(a[127:96]) - dst[79:64] := Saturate16(b[31:0]) - dst[95:80] := Saturate16(b[63:32]) - dst[111:96] := Saturate16(b[95:64]) - dst[127:112] := Saturate16(b[127:96]) - dst[143:128] := Saturate16(a[159:128]) - dst[159:144] := Saturate16(a[191:160]) - dst[175:160] := Saturate16(a[223:192]) - dst[191:176] := Saturate16(a[255:224]) - dst[207:192] := Saturate16(b[159:128]) - dst[223:208] := Saturate16(b[191:160]) - dst[239:224] := Saturate16(b[223:192]) - dst[255:240] := Saturate16(b[255:224]) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Miscellaneous + + + + Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst". + +dst[15:0] := Saturate16(a[31:0]) +dst[31:16] := Saturate16(a[63:32]) +dst[47:32] := Saturate16(a[95:64]) +dst[63:48] := Saturate16(a[127:96]) +dst[79:64] := Saturate16(b[31:0]) +dst[95:80] := Saturate16(b[63:32]) +dst[111:96] := Saturate16(b[95:64]) +dst[127:112] := Saturate16(b[127:96]) +dst[143:128] := Saturate16(a[159:128]) +dst[159:144] := Saturate16(a[191:160]) +dst[175:160] := Saturate16(a[223:192]) +dst[191:176] := Saturate16(a[255:224]) +dst[207:192] := Saturate16(b[159:128]) +dst[223:208] := Saturate16(b[191:160]) +dst[239:224] := Saturate16(b[223:192]) +dst[255:240] := Saturate16(b[255:224]) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Miscellaneous
- - - - Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers - using unsigned saturation, and store the results in "dst". - - dst[7:0] := SaturateU8(a[15:0]) - dst[15:8] := SaturateU8(a[31:16]) - dst[23:16] := SaturateU8(a[47:32]) - dst[31:24] := SaturateU8(a[63:48]) - dst[39:32] := SaturateU8(a[79:64]) - dst[47:40] := SaturateU8(a[95:80]) - dst[55:48] := SaturateU8(a[111:96]) - dst[63:56] := SaturateU8(a[127:112]) - dst[71:64] := SaturateU8(b[15:0]) - dst[79:72] := SaturateU8(b[31:16]) - dst[87:80] := SaturateU8(b[47:32]) - dst[95:88] := SaturateU8(b[63:48]) - dst[103:96] := SaturateU8(b[79:64]) - dst[111:104] := SaturateU8(b[95:80]) - dst[119:112] := SaturateU8(b[111:96]) - dst[127:120] := SaturateU8(b[127:112]) - dst[135:128] := SaturateU8(a[143:128]) - dst[143:136] := SaturateU8(a[159:144]) - dst[151:144] := SaturateU8(a[175:160]) - dst[159:152] := SaturateU8(a[191:176]) - dst[167:160] := SaturateU8(a[207:192]) - dst[175:168] := SaturateU8(a[223:208]) - dst[183:176] := SaturateU8(a[239:224]) - dst[191:184] := SaturateU8(a[255:240]) - dst[199:192] := SaturateU8(b[143:128]) - dst[207:200] := SaturateU8(b[159:144]) - dst[215:208] := SaturateU8(b[175:160]) - dst[223:216] := SaturateU8(b[191:176]) - dst[231:224] := SaturateU8(b[207:192]) - dst[239:232] := SaturateU8(b[223:208]) - dst[247:240] := SaturateU8(b[239:224]) - dst[255:248] := SaturateU8(b[255:240]) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Miscellaneous + + + + Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst". + +dst[7:0] := SaturateU8(a[15:0]) +dst[15:8] := SaturateU8(a[31:16]) +dst[23:16] := SaturateU8(a[47:32]) +dst[31:24] := SaturateU8(a[63:48]) +dst[39:32] := SaturateU8(a[79:64]) +dst[47:40] := SaturateU8(a[95:80]) +dst[55:48] := SaturateU8(a[111:96]) +dst[63:56] := SaturateU8(a[127:112]) +dst[71:64] := SaturateU8(b[15:0]) +dst[79:72] := SaturateU8(b[31:16]) +dst[87:80] := SaturateU8(b[47:32]) +dst[95:88] := SaturateU8(b[63:48]) +dst[103:96] := SaturateU8(b[79:64]) +dst[111:104] := SaturateU8(b[95:80]) +dst[119:112] := SaturateU8(b[111:96]) +dst[127:120] := SaturateU8(b[127:112]) +dst[135:128] := SaturateU8(a[143:128]) +dst[143:136] := SaturateU8(a[159:144]) +dst[151:144] := SaturateU8(a[175:160]) +dst[159:152] := SaturateU8(a[191:176]) +dst[167:160] := SaturateU8(a[207:192]) +dst[175:168] := SaturateU8(a[223:208]) +dst[183:176] := SaturateU8(a[239:224]) +dst[191:184] := SaturateU8(a[255:240]) +dst[199:192] := SaturateU8(b[143:128]) +dst[207:200] := SaturateU8(b[159:144]) +dst[215:208] := SaturateU8(b[175:160]) +dst[223:216] := SaturateU8(b[191:176]) +dst[231:224] := SaturateU8(b[207:192]) +dst[239:232] := SaturateU8(b[223:208]) +dst[247:240] := SaturateU8(b[239:224]) +dst[255:248] := SaturateU8(b[255:240]) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Miscellaneous
- - - - Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit - integers using unsigned saturation, and store the results in "dst". - - dst[15:0] := SaturateU16(a[31:0]) - dst[31:16] := SaturateU16(a[63:32]) - dst[47:32] := SaturateU16(a[95:64]) - dst[63:48] := SaturateU16(a[127:96]) - dst[79:64] := SaturateU16(b[31:0]) - dst[95:80] := SaturateU16(b[63:32]) - dst[111:96] := SaturateU16(b[95:64]) - dst[127:112] := SaturateU16(b[127:96]) - dst[143:128] := SaturateU16(a[159:128]) - dst[159:144] := SaturateU16(a[191:160]) - dst[175:160] := SaturateU16(a[223:192]) - dst[191:176] := SaturateU16(a[255:224]) - dst[207:192] := SaturateU16(b[159:128]) - dst[223:208] := SaturateU16(b[191:160]) - dst[239:224] := SaturateU16(b[223:192]) - dst[255:240] := SaturateU16(b[255:224]) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Miscellaneous + + + + Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst". + +dst[15:0] := SaturateU16(a[31:0]) +dst[31:16] := SaturateU16(a[63:32]) +dst[47:32] := SaturateU16(a[95:64]) +dst[63:48] := SaturateU16(a[127:96]) +dst[79:64] := SaturateU16(b[31:0]) +dst[95:80] := SaturateU16(b[63:32]) +dst[111:96] := SaturateU16(b[95:64]) +dst[127:112] := SaturateU16(b[127:96]) +dst[143:128] := SaturateU16(a[159:128]) +dst[159:144] := SaturateU16(a[191:160]) +dst[175:160] := SaturateU16(a[223:192]) +dst[191:176] := SaturateU16(a[255:224]) +dst[207:192] := SaturateU16(b[159:128]) +dst[223:208] := SaturateU16(b[191:160]) +dst[239:224] := SaturateU16(b[223:192]) +dst[255:240] := SaturateU16(b[255:224]) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Miscellaneous
- - - - Compute the bitwise AND of 256 bits (representing integer data) in "a" and "b", - and store the result in "dst". - - dst[255:0] := (a[255:0] AND b[255:0]) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Logical + + + + Compute the bitwise AND of 256 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[255:0] := (a[255:0] AND b[255:0]) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Logical
- - - - Compute the bitwise NOT of 256 bits (representing integer data) in "a" and then - AND with "b", and store the result in "dst". - - dst[255:0] := ((NOT a[255:0]) AND b[255:0]) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Logical + + + + Compute the bitwise NOT of 256 bits (representing integer data) in "a" and then AND with "b", and store the result in "dst". + +dst[255:0] := ((NOT a[255:0]) AND b[255:0]) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Logical
- - - - Compute the bitwise OR of 256 bits (representing integer data) in "a" and "b", - and store the result in "dst". - - dst[255:0] := (a[255:0] OR b[255:0]) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Logical + + + + Compute the bitwise OR of 256 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[255:0] := (a[255:0] OR b[255:0]) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Logical
- - - - Compute the bitwise XOR of 256 bits (representing integer data) in "a" and "b", - and store the result in "dst". - - dst[255:0] := (a[255:0] XOR b[255:0]) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Logical + + + + Compute the bitwise XOR of 256 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[255:0] := (a[255:0] XOR b[255:0]) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Logical
- - - - Average packed unsigned 8-bit integers in "a" and "b", and store the results in - "dst". - - FOR j := 0 to 31 - i := j*8 - dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Probability/Statistics + + + + Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Probability/Statistics
- - - - Average packed unsigned 16-bit integers in "a" and "b", and store the results - in "dst". - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Probability/Statistics + + + + Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Probability/Statistics
- - - - Compare packed 8-bit integers in "a" and "b" for equality, and store the - results in "dst". - - FOR j := 0 to 31 - i := j*8 - dst[i+7:i] := ( a[i+7:i] == b[i+7:i] ) ? 0xFF : 0 - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Compare + + + + Compare packed 8-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := ( a[i+7:i] == b[i+7:i] ) ? 0xFF : 0 +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Compare
- - - - Compare packed 16-bit integers in "a" and "b" for equality, and store the - results in "dst". - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := ( a[i+15:i] == b[i+15:i] ) ? 0xFFFF : 0 - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Compare + + + + Compare packed 16-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := ( a[i+15:i] == b[i+15:i] ) ? 0xFFFF : 0 +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Compare
- - - - Compare packed 32-bit integers in "a" and "b" for equality, and store the - results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := ( a[i+31:i] == b[i+31:i] ) ? 0xFFFFFFFF : 0 - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Compare + + + + Compare packed 32-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ( a[i+31:i] == b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Compare
- - - - Compare packed 64-bit integers in "a" and "b" for equality, and store the - results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := ( a[i+63:i] == b[i+63:i] ) ? 0xFFFFFFFFFFFFFFFF : 0 - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Compare + + + + Compare packed 64-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ( a[i+63:i] == b[i+63:i] ) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Compare
- - - - Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store - the results in "dst". - - FOR j := 0 to 31 - i := j*8 - dst[i+7:i] := ( a[i+7:i] > b[i+7:i] ) ? 0xFF : 0 - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Compare + + + + Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := ( a[i+7:i] > b[i+7:i] ) ? 0xFF : 0 +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Compare
- - - - Compare packed signed 16-bit integers in "a" and "b" for greater-than, and - store the results in "dst". - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := ( a[i+15:i] > b[i+15:i] ) ? 0xFFFF : 0 - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Compare + + + + Compare packed signed 16-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := ( a[i+15:i] > b[i+15:i] ) ? 0xFFFF : 0 +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Compare
- - - - Compare packed signed 32-bit integers in "a" and "b" for greater-than, and - store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := ( a[i+31:i] > b[i+31:i] ) ? 0xFFFFFFFF : 0 - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Compare + + + + Compare packed signed 32-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ( a[i+31:i] > b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Compare
- - - - Compare packed signed 64-bit integers in "a" and "b" for greater-than, and - store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := ( a[i+63:i] > b[i+63:i] ) ? 0xFFFFFFFFFFFFFFFF : 0 - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Compare + + + + Compare packed signed 64-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ( a[i+63:i] > b[i+63:i] ) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Compare
- - - Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store - the results in "dst". - - FOR j:= 0 to 7 - i := 32*j - k := 16*j - dst[i+31:i] := SignExtend32(a[k+15:k]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Convert + + + Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j:= 0 to 7 + i := 32*j + k := 16*j + dst[i+31:i] := SignExtend32(a[k+15:k]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Convert
- - - Sign extend packed 16-bit integers in "a" to packed 64-bit integers, and store - the results in "dst". - - FOR j:= 0 to 3 - i := 64*j - k := 16*j - dst[i+63:i] := SignExtend64(a[k+15:k]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Convert + + + Sign extend packed 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j:= 0 to 3 + i := 64*j + k := 16*j + dst[i+63:i] := SignExtend64(a[k+15:k]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Convert
- - - Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store - the results in "dst". - - FOR j:= 0 to 3 - i := 64*j - k := 32*j - dst[i+63:i] := SignExtend64(a[k+31:k]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Convert + + + Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j:= 0 to 3 + i := 64*j + k := 32*j + dst[i+63:i] := SignExtend64(a[k+31:k]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Convert
- - - Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store - the results in "dst". - - FOR j := 0 to 15 - i := j*8 - l := j*16 - dst[l+15:l] := SignExtend16(a[i+7:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Convert + + + Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + l := j*16 + dst[l+15:l] := SignExtend16(a[i+7:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Convert
- - - Sign extend packed 8-bit integers in "a" to packed 32-bit integers, and store - the results in "dst". - - FOR j := 0 to 7 - i := 32*j - k := 8*j - dst[i+31:i] := SignExtend32(a[k+7:k]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Convert + + + Sign extend packed 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 8*j + dst[i+31:i] := SignExtend32(a[k+7:k]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Convert
- - - Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 64-bit - integers, and store the results in "dst". - - FOR j := 0 to 3 - i := 64*j - k := 8*j - dst[i+63:i] := SignExtend64(a[k+7:k]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Convert + + + Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := 64*j + k := 8*j + dst[i+63:i] := SignExtend64(a[k+7:k]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Convert
- - - Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, - and store the results in "dst". - - FOR j := 0 to 7 - i := 32*j - k := 16*j - dst[i+31:i] := ZeroExtend32(a[k+15:k]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Convert + + + Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 16*j + dst[i+31:i] := ZeroExtend32(a[k+15:k]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Convert
- - - Zero extend packed unsigned 16-bit integers in "a" to packed 64-bit integers, - and store the results in "dst". - - FOR j:= 0 to 3 - i := 64*j - k := 16*j - dst[i+63:i] := ZeroExtend64(a[k+15:k]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Convert + + + Zero extend packed unsigned 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j:= 0 to 3 + i := 64*j + k := 16*j + dst[i+63:i] := ZeroExtend64(a[k+15:k]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Convert
- - - Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, - and store the results in "dst". - - FOR j:= 0 to 3 - i := 64*j - k := 32*j - dst[i+63:i] := ZeroExtend64(a[k+31:k]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Convert + + + Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j:= 0 to 3 + i := 64*j + k := 32*j + dst[i+63:i] := ZeroExtend64(a[k+31:k]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Convert
- - - Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, - and store the results in "dst". - - FOR j := 0 to 15 - i := j*8 - l := j*16 - dst[l+15:l] := ZeroExtend16(a[i+7:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Convert + + + Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + l := j*16 + dst[l+15:l] := ZeroExtend16(a[i+7:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Convert
- - - Zero extend packed unsigned 8-bit integers in "a" to packed 32-bit integers, - and store the results in "dst". - - FOR j := 0 to 7 - i := 32*j - k := 8*j - dst[i+31:i] := ZeroExtend32(a[k+7:k]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Convert + + + Zero extend packed unsigned 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 8*j + dst[i+31:i] := ZeroExtend32(a[k+7:k]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Convert
- - - Zero extend packed unsigned 8-bit integers in the low 8 byte sof "a" to packed - 64-bit integers, and store the results in "dst". - - FOR j := 0 to 3 - i := 64*j - k := 8*j - dst[i+63:i] := ZeroExtend64(a[k+7:k]) - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Convert + + + Zero extend packed unsigned 8-bit integers in the low 8 byte sof "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := 64*j + k := 8*j + dst[i+63:i] := ZeroExtend64(a[k+7:k]) +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Convert
- - - - - Gather double-precision (64-bit) floating-point elements from memory using - 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and - offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*64 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ENDFOR - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + Gather double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*64 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] +ENDFOR +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - Gather double-precision (64-bit) floating-point elements from memory using - 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and - offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*64 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + Gather double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*64 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - Gather single-precision (32-bit) floating-point elements from memory using - 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and - offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*32 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ENDFOR - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + Gather single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*32 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] +ENDFOR +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - Gather single-precision (32-bit) floating-point elements from memory using - 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and - offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*32 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + Gather single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*32 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are - loaded from addresses starting at "base_addr" and offset by each 32-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst". "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*32 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ENDFOR - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*32 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] +ENDFOR +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are - loaded from addresses starting at "base_addr" and offset by each 32-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst". "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*32 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*32 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are - loaded from addresses starting at "base_addr" and offset by each 32-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst". "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*64 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ENDFOR - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*64 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] +ENDFOR +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are - loaded from addresses starting at "base_addr" and offset by each 32-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst". "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*64 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*64 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - Gather double-precision (64-bit) floating-point elements from memory using - 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and - offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*64 - m := j*64 - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ENDFOR - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + Gather double-precision (64-bit) floating-point elements from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*64 + m := j*64 + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] +ENDFOR +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - Gather double-precision (64-bit) floating-point elements from memory using - 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and - offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*64 - m := j*64 - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + Gather double-precision (64-bit) floating-point elements from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*64 + m := j*64 + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - Gather single-precision (32-bit) floating-point elements from memory using - 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and - offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*32 - m := j*64 - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ENDFOR - dst[MAX:64] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + Gather single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*32 + m := j*64 + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] +ENDFOR +dst[MAX:64] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - Gather single-precision (32-bit) floating-point elements from memory using - 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and - offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*32 - m := j*64 - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ENDFOR - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + Gather single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*32 + m := j*64 + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] +ENDFOR +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are - loaded from addresses starting at "base_addr" and offset by each 64-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst". "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*32 - m := j*64 - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ENDFOR - dst[MAX:64] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*32 + m := j*64 + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] +ENDFOR +dst[MAX:64] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are - loaded from addresses starting at "base_addr" and offset by each 64-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst". "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*32 - m := j*64 - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ENDFOR - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*32 + m := j*64 + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] +ENDFOR +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are - loaded from addresses starting at "base_addr" and offset by each 64-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst". "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*64 - m := j*64 - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ENDFOR - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*64 + m := j*64 + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] +ENDFOR +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are - loaded from addresses starting at "base_addr" and offset by each 64-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst". "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*64 - m := j*64 - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*64 + m := j*64 + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - - - Gather double-precision (64-bit) floating-point elements from memory using - 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and - offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from - "src" when the highest bit is not set in the corresponding element). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*64 - m := j*32 - IF mask[i+63] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - mask[MAX:128] := 0 - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + + + Gather double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*64 + m := j*32 + IF mask[i+63] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +mask[MAX:128] := 0 +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - - - Gather double-precision (64-bit) floating-point elements from memory using - 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and - offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from - "src" when the highest bit is not set in the corresponding element). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*64 - m := j*32 - IF mask[i+63] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - mask[MAX:256] := 0 - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + + + Gather double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*64 + m := j*32 + IF mask[i+63] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +mask[MAX:256] := 0 +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - - - Gather single-precision (32-bit) floating-point elements from memory using - 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and - offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from - "src" when the highest bit is not set in the corresponding element). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*32 - m := j*32 - IF mask[i+31] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - mask[MAX:128] := 0 - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + + + Gather single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*32 + m := j*32 + IF mask[i+31] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +mask[MAX:128] := 0 +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - - - Gather single-precision (32-bit) floating-point elements from memory using - 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and - offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from - "src" when the highest bit is not set in the corresponding element). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*32 - m := j*32 - IF mask[i+31] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - mask[MAX:256] := 0 - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + + + Gather single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*32 + m := j*32 + IF mask[i+31] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +mask[MAX:256] := 0 +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - - - Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are - loaded from addresses starting at "base_addr" and offset by each 32-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst" using "mask" (elements are copied from "src" when the highest bit is not set - in the corresponding element). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*32 - m := j*32 - IF mask[i+31] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - mask[MAX:128] := 0 - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + + + Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*32 + m := j*32 + IF mask[i+31] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +mask[MAX:128] := 0 +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - - - Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are - loaded from addresses starting at "base_addr" and offset by each 32-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst" using "mask" (elements are copied from "src" when the highest bit is not set - in the corresponding element). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*32 - m := j*32 - IF mask[i+31] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - mask[MAX:256] := 0 - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + + + Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*32 + m := j*32 + IF mask[i+31] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +mask[MAX:256] := 0 +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - - - Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are - loaded from addresses starting at "base_addr" and offset by each 32-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst" using "mask" (elements are copied from "src" when the highest bit is not set - in the corresponding element). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*64 - m := j*32 - IF mask[i+63] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - mask[MAX:128] := 0 - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + + + Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*64 + m := j*32 + IF mask[i+63] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +mask[MAX:128] := 0 +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - - - Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are - loaded from addresses starting at "base_addr" and offset by each 32-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst" using "mask" (elements are copied from "src" when the highest bit is not set - in the corresponding element). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*64 - m := j*32 - IF mask[i+63] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - mask[MAX:256] := 0 - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + + + Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*64 + m := j*32 + IF mask[i+63] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +mask[MAX:256] := 0 +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - - - Gather double-precision (64-bit) floating-point elements from memory using - 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and - offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from - "src" when the highest bit is not set in the corresponding element). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*64 - m := j*64 - IF mask[i+63] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - mask[MAX:128] := 0 - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + + + Gather double-precision (64-bit) floating-point elements from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*64 + m := j*64 + IF mask[i+63] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +mask[MAX:128] := 0 +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - - - Gather double-precision (64-bit) floating-point elements from memory using - 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and - offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from - "src" when the highest bit is not set in the corresponding element). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*64 - m := j*64 - IF mask[i+63] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - mask[MAX:256] := 0 - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + + + Gather double-precision (64-bit) floating-point elements from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*64 + m := j*64 + IF mask[i+63] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +mask[MAX:256] := 0 +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - - - Gather single-precision (32-bit) floating-point elements from memory using - 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and - offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from - "src" when the highest bit is not set in the corresponding element). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*32 - m := j*64 - IF mask[i+31] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - mask[MAX:64] := 0 - dst[MAX:64] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + + + Gather single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*32 + m := j*64 + IF mask[i+31] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +mask[MAX:64] := 0 +dst[MAX:64] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - - - Gather single-precision (32-bit) floating-point elements from memory using - 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and - offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from - "src" when the highest bit is not set in the corresponding element). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*32 - m := j*64 - IF mask[i+31] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - mask[MAX:128] := 0 - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + + + Gather single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*32 + m := j*64 + IF mask[i+31] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +mask[MAX:128] := 0 +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - - - Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are - loaded from addresses starting at "base_addr" and offset by each 64-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst" using "mask" (elements are copied from "src" when the highest bit is not set - in the corresponding element). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*32 - m := j*64 - IF mask[i+31] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - mask[MAX:64] := 0 - dst[MAX:64] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + + + Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*32 + m := j*64 + IF mask[i+31] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +mask[MAX:64] := 0 +dst[MAX:64] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - - - Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are - loaded from addresses starting at "base_addr" and offset by each 64-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst" using "mask" (elements are copied from "src" when the highest bit is not set - in the corresponding element). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*32 - m := j*64 - IF mask[i+31] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - mask[MAX:128] := 0 - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + + + Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*32 + m := j*64 + IF mask[i+31] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +mask[MAX:128] := 0 +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - - - Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are - loaded from addresses starting at "base_addr" and offset by each 64-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst" using "mask" (elements are copied from "src" when the highest bit is not set - in the corresponding element). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*64 - m := j*64 - IF mask[i+63] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - mask[MAX:128] := 0 - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + + + Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*64 + m := j*64 + IF mask[i+63] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +mask[MAX:128] := 0 +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - - - Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are - loaded from addresses starting at "base_addr" and offset by each 64-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst" using "mask" (elements are copied from "src" when the highest bit is not set - in the corresponding element). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*64 - m := j*64 - IF mask[i+63] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - mask[MAX:256] := 0 - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Load + + + + + + + Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*64 + m := j*64 + IF mask[i+63] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +mask[MAX:256] := 0 +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - Load packed 32-bit integers from memory into "dst" using "mask" (elements are - zeroed out when the highest bit is not set in the corresponding element). - - FOR j := 0 to 3 - i := j*32 - IF mask[i+31] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Load + + + + Load packed 32-bit integers from memory into "dst" using "mask" (elements are zeroed out when the highest bit is not set in the corresponding element). + +FOR j := 0 to 3 + i := j*32 + IF mask[i+31] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - Load packed 32-bit integers from memory into "dst" using "mask" (elements are - zeroed out when the highest bit is not set in the corresponding element). - - FOR j := 0 to 7 - i := j*32 - IF mask[i+31] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Load + + + + Load packed 32-bit integers from memory into "dst" using "mask" (elements are zeroed out when the highest bit is not set in the corresponding element). + +FOR j := 0 to 7 + i := j*32 + IF mask[i+31] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - Load packed 64-bit integers from memory into "dst" using "mask" (elements are - zeroed out when the highest bit is not set in the corresponding element). - - FOR j := 0 to 1 - i := j*64 - IF mask[i+63] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Load + + + + Load packed 64-bit integers from memory into "dst" using "mask" (elements are zeroed out when the highest bit is not set in the corresponding element). + +FOR j := 0 to 1 + i := j*64 + IF mask[i+63] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - Load packed 64-bit integers from memory into "dst" using "mask" (elements are - zeroed out when the highest bit is not set in the corresponding element). - - FOR j := 0 to 3 - i := j*64 - IF mask[i+63] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Load + + + + Load packed 64-bit integers from memory into "dst" using "mask" (elements are zeroed out when the highest bit is not set in the corresponding element). + +FOR j := 0 to 3 + i := j*64 + IF mask[i+63] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - Load 256-bits of integer data from memory into "dst" using a non-temporal - memory hint. - "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may - be generated. - - dst[255:0] := MEM[mem_addr+255:mem_addr] - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Load + + + Load 256-bits of integer data from memory into "dst" using a non-temporal memory hint. + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +dst[255:0] := MEM[mem_addr+255:mem_addr] +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Load
- - - - - Store packed 32-bit integers from "a" into memory using "mask" (elements are - not stored when the highest bit is not set in the corresponding element). - - FOR j := 0 to 3 - i := j*32 - IF mask[i+31] - MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] - FI - ENDFOR - - - AVX2 -
immintrin.h
- Store + + + + + Store packed 32-bit integers from "a" into memory using "mask" (elements are not stored when the highest bit is not set in the corresponding element). + +FOR j := 0 to 3 + i := j*32 + IF mask[i+31] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + + AVX2 +
immintrin.h
+ Store
- - - - - Store packed 32-bit integers from "a" into memory using "mask" (elements are - not stored when the highest bit is not set in the corresponding element). - - FOR j := 0 to 7 - i := j*32 - IF mask[i+31] - MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] - FI - ENDFOR - - - AVX2 -
immintrin.h
- Store + + + + + Store packed 32-bit integers from "a" into memory using "mask" (elements are not stored when the highest bit is not set in the corresponding element). + +FOR j := 0 to 7 + i := j*32 + IF mask[i+31] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + + AVX2 +
immintrin.h
+ Store
- - - - - Store packed 64-bit integers from "a" into memory using "mask" (elements are - not stored when the highest bit is not set in the corresponding element). - - FOR j := 0 to 1 - i := j*64 - IF mask[i+63] - MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] - FI - ENDFOR - - - AVX2 -
immintrin.h
- Store + + + + + Store packed 64-bit integers from "a" into memory using "mask" (elements are not stored when the highest bit is not set in the corresponding element). + +FOR j := 0 to 1 + i := j*64 + IF mask[i+63] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + + AVX2 +
immintrin.h
+ Store
- - - - - Store packed 64-bit integers from "a" into memory using "mask" (elements are - not stored when the highest bit is not set in the corresponding element). - - FOR j := 0 to 3 - i := j*64 - IF mask[i+63] - MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] - FI - ENDFOR - - - AVX2 -
immintrin.h
- Store + + + + + Store packed 64-bit integers from "a" into memory using "mask" (elements are not stored when the highest bit is not set in the corresponding element). + +FOR j := 0 to 3 + i := j*64 + IF mask[i+63] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + + AVX2 +
immintrin.h
+ Store
- - - - Shift 128-bit lanes in "a" left by "imm8" bytes while shifting in zeros, and - store the results in "dst". - - tmp := imm8[7:0] - IF tmp > 15 - tmp := 16 - FI - dst[127:0] := a[127:0] << (tmp*8) - dst[255:128] := a[255:128] << (tmp*8) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift 128-bit lanes in "a" left by "imm8" bytes while shifting in zeros, and store the results in "dst". + +tmp := imm8[7:0] +IF tmp > 15 + tmp := 16 +FI +dst[127:0] := a[127:0] << (tmp*8) +dst[255:128] := a[255:128] << (tmp*8) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift 128-bit lanes in "a" left by "imm8" bytes while shifting in zeros, and - store the results in "dst". - - tmp := imm8[7:0] - IF tmp > 15 - tmp := 16 - FI - dst[127:0] := a[127:0] << (tmp*8) - dst[255:128] := a[255:128] << (tmp*8) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift 128-bit lanes in "a" left by "imm8" bytes while shifting in zeros, and store the results in "dst". + +tmp := imm8[7:0] +IF tmp > 15 + tmp := 16 +FI +dst[127:0] := a[127:0] << (tmp*8) +dst[255:128] := a[255:128] << (tmp*8) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 15 - i := j*16 - IF count[63:0] > 15 - dst[i+15:i] := 0 - ELSE - dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[63:0]) - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[63:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst". - - FOR j := 0 to 15 - i := j*16 - IF imm8[7:0] > 15 - dst[i+15:i] := 0 - ELSE - dst[i+15:i] := ZeroExtend16(a[i+15:i] << imm8[7:0]) - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend16(a[i+15:i] << imm8[7:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - IF count[63:0] > 31 - dst[i+31:i] := 0 - ELSE - dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[63:0]) - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[63:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - IF imm8[7:0] > 31 - dst[i+31:i] := 0 - ELSE - dst[i+31:i] := ZeroExtend32(a[i+31:i] << imm8[7:0]) - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend32(a[i+31:i] << imm8[7:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - IF count[63:0] > 63 - dst[i+63:i] := 0 - ELSE - dst[i+63:i] := ZeroExtend64(a[i+63:i] << count[63:0]) - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF count[63:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend64(a[i+63:i] << count[63:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - IF imm8[7:0] > 63 - dst[i+63:i] := 0 - ELSE - dst[i+63:i] := ZeroExtend64(a[i+63:i] << imm8[7:0]) - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF imm8[7:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend64(a[i+63:i] << imm8[7:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in - "dst". - - FOR j := 0 to 3 - i := j*32 - IF count[i+31:i] < 32 - dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF count[i+31:i] < 32 + dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in - "dst". - - FOR j := 0 to 7 - i := j*32 - IF count[i+31:i] < 32 - dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF count[i+31:i] < 32 + dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in - "dst". - - FOR j := 0 to 1 - i := j*64 - IF count[i+63:i] < 64 - dst[i+63:i] := ZeroExtend64(a[i+63:i] << count[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF count[i+63:i] < 64 + dst[i+63:i] := ZeroExtend64(a[i+63:i] << count[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in - "dst". - - FOR j := 0 to 3 - i := j*64 - IF count[i+63:i] < 64 - dst[i+63:i] := ZeroExtend64(a[i+63:i] << count[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF count[i+63:i] < 64 + dst[i+63:i] := ZeroExtend64(a[i+63:i] << count[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst". - - FOR j := 0 to 15 - i := j*16 - IF count[63:0] > 15 - dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) - ELSE - dst[i+15:i] := SignExtend16(a[i+15:i] >> count[63:0]) - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) + ELSE + dst[i+15:i] := SignExtend16(a[i+15:i] >> count[63:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst". - - FOR j := 0 to 15 - i := j*16 - IF imm8[7:0] > 15 - dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) - ELSE - dst[i+15:i] := SignExtend16(a[i+15:i] >> imm8[7:0]) - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) + ELSE + dst[i+15:i] := SignExtend16(a[i+15:i] >> imm8[7:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - IF count[63:0] > 31 - dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) - ELSE - dst[i+31:i] := SignExtend32(a[i+31:i] >> count[63:0]) - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) + ELSE + dst[i+31:i] := SignExtend32(a[i+31:i] >> count[63:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - IF imm8[7:0] > 31 - dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) - ELSE - dst[i+31:i] := SignExtend32(a[i+31:i] >> imm8[7:0]) - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) + ELSE + dst[i+31:i] := SignExtend32(a[i+31:i] >> imm8[7:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst". - - FOR j := 0 to 3 - i := j*32 - IF count[i+31:i] < 32 - dst[i+31:i] := SignExtend32(a[i+31:i] >> count[i+31:i]) - ELSE - dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0) - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF count[i+31:i] < 32 + dst[i+31:i] := SignExtend32(a[i+31:i] >> count[i+31:i]) + ELSE + dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0) + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst". - - FOR j := 0 to 7 - i := j*32 - IF count[i+31:i] < 32 - dst[i+31:i] := SignExtend32(a[i+31:i] >> count[i+31:i]) - ELSE - dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0) - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF count[i+31:i] < 32 + dst[i+31:i] := SignExtend32(a[i+31:i] >> count[i+31:i]) + ELSE + dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0) + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift 128-bit lanes in "a" right by "imm8" bytes while shifting in zeros, and - store the results in "dst". - - tmp := imm8[7:0] - IF tmp > 15 - tmp := 16 - FI - dst[127:0] := a[127:0] >> (tmp*8) - dst[255:128] := a[255:128] >> (tmp*8) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift 128-bit lanes in "a" right by "imm8" bytes while shifting in zeros, and store the results in "dst". + +tmp := imm8[7:0] +IF tmp > 15 + tmp := 16 +FI +dst[127:0] := a[127:0] >> (tmp*8) +dst[255:128] := a[255:128] >> (tmp*8) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift 128-bit lanes in "a" right by "imm8" bytes while shifting in zeros, and - store the results in "dst". - - tmp := imm8[7:0] - IF tmp > 15 - tmp := 16 - FI - dst[127:0] := a[127:0] >> (tmp*8) - dst[255:128] := a[255:128] >> (tmp*8) - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift 128-bit lanes in "a" right by "imm8" bytes while shifting in zeros, and store the results in "dst". + +tmp := imm8[7:0] +IF tmp > 15 + tmp := 16 +FI +dst[127:0] := a[127:0] >> (tmp*8) +dst[255:128] := a[255:128] >> (tmp*8) +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 15 - i := j*16 - IF count[63:0] > 15 - dst[i+15:i] := 0 - ELSE - dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[63:0]) - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[63:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 15 - i := j*16 - IF imm8[7:0] > 15 - dst[i+15:i] := 0 - ELSE - dst[i+15:i] := ZeroExtend16(a[i+15:i] >> imm8[7:0]) - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend16(a[i+15:i] >> imm8[7:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - IF count[63:0] > 31 - dst[i+31:i] := 0 - ELSE - dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[63:0]) - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[63:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - IF imm8[7:0] > 31 - dst[i+31:i] := 0 - ELSE - dst[i+31:i] := ZeroExtend32(a[i+31:i] >> imm8[7:0]) - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend32(a[i+31:i] >> imm8[7:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - IF count[63:0] > 63 - dst[i+63:i] := 0 - ELSE - dst[i+63:i] := ZeroExtend64(a[i+63:i] >> count[63:0]) - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF count[63:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend64(a[i+63:i] >> count[63:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - IF imm8[7:0] > 63 - dst[i+63:i] := 0 - ELSE - dst[i+63:i] := ZeroExtend64(a[i+63:i] >> imm8[7:0]) - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF imm8[7:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend64(a[i+63:i] >> imm8[7:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in - "dst". - - FOR j := 0 to 3 - i := j*32 - IF count[i+31:i] < 32 - dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF count[i+31:i] < 32 + dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in - "dst". - - FOR j := 0 to 7 - i := j*32 - IF count[i+31:i] < 32 - dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF count[i+31:i] < 32 + dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in - "dst". - - FOR j := 0 to 1 - i := j*64 - IF count[i+63:i] < 64 - dst[i+63:i] := ZeroExtend64(a[i+63:i] >> count[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX2 -
immintrin.h
- Shift + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF count[i+63:i] < 64 + dst[i+63:i] := ZeroExtend64(a[i+63:i] >> count[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX2 +
immintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in - "dst". - - FOR j := 0 to 3 - i := j*64 - IF count[i+63:i] < 64 - dst[i+63:i] := ZeroExtend64(a[i+63:i] >> count[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX2 -
immintrin.h
- Shift -
- - - - - - - - Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit - integers in "a" compared to those in "b", and store the 16-bit results in "dst". - Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two - SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the - uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected from - within 128-bit lanes according to the control in "imm8", and each SAD in each 64-bit - lane uses the selected quadruplet at 8-bit offsets. - - FOR i := 0 to 1 - tmp.m128[i].dword[0] := b.m128[i].dword[ imm8[1:0] ] - tmp.m128[i].dword[1] := b.m128[i].dword[ imm8[3:2] ] - tmp.m128[i].dword[2] := b.m128[i].dword[ imm8[5:4] ] - tmp.m128[i].dword[3] := b.m128[i].dword[ imm8[7:6] ] - ENDFOR - FOR j := 0 to 3 - i := j*64 - dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\ - ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) - - dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) +\ - ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) - - dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - - tmp[i+31:i+24]) +\ - ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) - - dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - - tmp[i+39:i+32]) +\ - ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF count[i+63:i] < 64 + dst[i+63:i] := ZeroExtend64(a[i+63:i] >> count[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX2 +
immintrin.h
+ Shift +
+ + + + + + + + Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst". + Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected from within 128-bit lanes according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets. + +FOR i := 0 to 1 + tmp.m128[i].dword[0] := b.m128[i].dword[ imm8[1:0] ] + tmp.m128[i].dword[1] := b.m128[i].dword[ imm8[3:2] ] + tmp.m128[i].dword[2] := b.m128[i].dword[ imm8[5:4] ] + tmp.m128[i].dword[3] := b.m128[i].dword[ imm8[7:6] ] +ENDFOR +FOR j := 0 to 3 + i := j*64 + dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\ + ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) + + dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) +\ + ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) + + dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) +\ + ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) + + dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) +\ + ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit - integers in "a" compared to those in "b", and store the 16-bit results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two - SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the - uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected from - within 128-bit lanes according to the control in "imm8", and each SAD in each 64-bit - lane uses the selected quadruplet at 8-bit offsets. - - FOR i := 0 to 1 - tmp.m128[i].dword[0] := b.m128[i].dword[ imm8[1:0] ] - tmp.m128[i].dword[1] := b.m128[i].dword[ imm8[3:2] ] - tmp.m128[i].dword[2] := b.m128[i].dword[ imm8[5:4] ] - tmp.m128[i].dword[3] := b.m128[i].dword[ imm8[7:6] ] - ENDFOR - FOR j := 0 to 3 - i := j*64 - tmp_dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\ - ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) - tmp_dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) - +\ - ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) - tmp_dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - - tmp[i+31:i+24]) +\ - ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) - tmp_dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - - tmp[i+39:i+32]) +\ - ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) - ENDFOR - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected from within 128-bit lanes according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets. + +FOR i := 0 to 1 + tmp.m128[i].dword[0] := b.m128[i].dword[ imm8[1:0] ] + tmp.m128[i].dword[1] := b.m128[i].dword[ imm8[3:2] ] + tmp.m128[i].dword[2] := b.m128[i].dword[ imm8[5:4] ] + tmp.m128[i].dword[3] := b.m128[i].dword[ imm8[7:6] ] +ENDFOR +FOR j := 0 to 3 + i := j*64 + tmp_dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\ + ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) + + tmp_dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) +\ + ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) + + tmp_dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) +\ + ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) + + tmp_dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) +\ + ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) +ENDFOR +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit - integers in "a" compared to those in "b", and store the 16-bit results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two - SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the - uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected from - within 128-bit lanes according to the control in "imm8", and each SAD in each 64-bit - lane uses the selected quadruplet at 8-bit offsets. - - FOR i := 0 to 1 - tmp.m128[i].dword[0] := b.m128[i].dword[ imm8[1:0] ] - tmp.m128[i].dword[1] := b.m128[i].dword[ imm8[3:2] ] - tmp.m128[i].dword[2] := b.m128[i].dword[ imm8[5:4] ] - tmp.m128[i].dword[3] := b.m128[i].dword[ imm8[7:6] ] - ENDFOR - FOR j := 0 to 3 - i := j*64 - tmp_dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\ - ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) - tmp_dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) - +\ - ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) - tmp_dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - - tmp[i+31:i+24]) +\ - ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) - tmp_dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - - tmp[i+39:i+32]) +\ - ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) - ENDFOR - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected from within 128-bit lanes according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets. + +FOR i := 0 to 1 + tmp.m128[i].dword[0] := b.m128[i].dword[ imm8[1:0] ] + tmp.m128[i].dword[1] := b.m128[i].dword[ imm8[3:2] ] + tmp.m128[i].dword[2] := b.m128[i].dword[ imm8[5:4] ] + tmp.m128[i].dword[3] := b.m128[i].dword[ imm8[7:6] ] +ENDFOR +FOR j := 0 to 3 + i := j*64 + tmp_dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\ + ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) + + tmp_dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) +\ + ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) + + tmp_dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) +\ + ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) + + tmp_dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) +\ + ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) +ENDFOR +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit - integers in "a" compared to those in "b", and store the 16-bit results in "dst". - Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two - SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the - uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected - according to the control in "imm8", and each SAD in each 64-bit lane uses the selected - quadruplet at 8-bit offsets. - - tmp.dword[0] := b.dword[ imm8[1:0] ] - tmp.dword[1] := b.dword[ imm8[3:2] ] - tmp.dword[2] := b.dword[ imm8[5:4] ] - tmp.dword[3] := b.dword[ imm8[7:6] ] - FOR j := 0 to 1 - i := j*64 - dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\ - ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) - - dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) +\ - ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) - - dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - - tmp[i+31:i+24]) +\ - ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) - - dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - - tmp[i+39:i+32]) +\ - ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst". + Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets. + +tmp.dword[0] := b.dword[ imm8[1:0] ] +tmp.dword[1] := b.dword[ imm8[3:2] ] +tmp.dword[2] := b.dword[ imm8[5:4] ] +tmp.dword[3] := b.dword[ imm8[7:6] ] +FOR j := 0 to 1 + i := j*64 + dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\ + ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) + + dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) +\ + ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) + + dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) +\ + ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) + + dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) +\ + ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit - integers in "a" compared to those in "b", and store the 16-bit results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two - SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the - uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected - according to the control in "imm8", and each SAD in each 64-bit lane uses the selected - quadruplet at 8-bit offsets. - - tmp.dword[0] := b.dword[ imm8[1:0] ] - tmp.dword[1] := b.dword[ imm8[3:2] ] - tmp.dword[2] := b.dword[ imm8[5:4] ] - tmp.dword[3] := b.dword[ imm8[7:6] ] - FOR j := 0 to 1 - i := j*64 - tmp_dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\ - ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) - tmp_dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) - +\ - ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) - tmp_dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - - tmp[i+31:i+24]) +\ - ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) - tmp_dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - - tmp[i+39:i+32]) +\ - ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) - ENDFOR - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets. + +tmp.dword[0] := b.dword[ imm8[1:0] ] +tmp.dword[1] := b.dword[ imm8[3:2] ] +tmp.dword[2] := b.dword[ imm8[5:4] ] +tmp.dword[3] := b.dword[ imm8[7:6] ] +FOR j := 0 to 1 + i := j*64 + tmp_dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\ + ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) + + tmp_dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) +\ + ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) + + tmp_dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) +\ + ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) + + tmp_dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) +\ + ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) +ENDFOR +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit - integers in "a" compared to those in "b", and store the 16-bit results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two - SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the - uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected - according to the control in "imm8", and each SAD in each 64-bit lane uses the selected - quadruplet at 8-bit offsets. - - tmp.dword[0] := b.dword[ imm8[1:0] ] - tmp.dword[1] := b.dword[ imm8[3:2] ] - tmp.dword[2] := b.dword[ imm8[5:4] ] - tmp.dword[3] := b.dword[ imm8[7:6] ] - FOR j := 0 to 1 - i := j*64 - tmp_dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\ - ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) - tmp_dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) - +\ - ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) - tmp_dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - - tmp[i+31:i+24]) +\ - ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) - tmp_dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - - tmp[i+39:i+32]) +\ - ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) - ENDFOR - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets. + +tmp.dword[0] := b.dword[ imm8[1:0] ] +tmp.dword[1] := b.dword[ imm8[3:2] ] +tmp.dword[2] := b.dword[ imm8[5:4] ] +tmp.dword[3] := b.dword[ imm8[7:6] ] +FOR j := 0 to 1 + i := j*64 + tmp_dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\ + ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) + + tmp_dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) +\ + ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) + + tmp_dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) +\ + ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) + + tmp_dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) +\ + ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) +ENDFOR +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary - result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 1 - i := j*128 - tmp[255:0] := ((a[i+127:i] << 128)[255:0] OR b[i+127:i]) >> (imm8*8) - tmp_dst[i+127:i] := tmp[127:0] - ENDFOR - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*128 + tmp[255:0] := ((a[i+127:i] << 128)[255:0] OR b[i+127:i]) >> (imm8*8) + tmp_dst[i+127:i] := tmp[127:0] +ENDFOR +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary - result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*128 - tmp[255:0] := ((a[i+127:i] << 128)[255:0] OR b[i+127:i]) >> (imm8*8) - tmp_dst[i+127:i] := tmp[127:0] - ENDFOR - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*128 + tmp[255:0] := ((a[i+127:i] << 128)[255:0] OR b[i+127:i]) >> (imm8*8) + tmp_dst[i+127:i] := tmp[127:0] +ENDFOR +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary - result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - tmp_dst[255:0] := ((a[127:0] << 128)[255:0] OR b[127:0]) >> (imm8*8) - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[255:0] := ((a[127:0] << 128)[255:0] OR b[127:0]) >> (imm8*8) +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary - result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - tmp_dst[255:0] := ((a[127:0] << 128)[255:0] OR b[127:0]) >> (imm8*8) - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[255:0] := ((a[127:0] << 128)[255:0] OR b[127:0]) >> (imm8*8) +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Blend packed 8-bit integers from "a" and "b" using control mask "k", and store - the results in "dst". - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := b[i+7:i] - ELSE - dst[i+7:i] := a[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Blend packed 8-bit integers from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := b[i+7:i] + ELSE + dst[i+7:i] := a[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Blend packed 8-bit integers from "a" and "b" using control mask "k", and store - the results in "dst". - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := b[i+7:i] - ELSE - dst[i+7:i] := a[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Blend packed 8-bit integers from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := b[i+7:i] + ELSE + dst[i+7:i] := a[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Blend packed 16-bit integers from "a" and "b" using control mask "k", and store - the results in "dst". - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := b[i+15:i] - ELSE - dst[i+15:i] := a[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Blend packed 16-bit integers from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := b[i+15:i] + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Blend packed 16-bit integers from "a" and "b" using control mask "k", and store - the results in "dst". - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := b[i+15:i] - ELSE - dst[i+15:i] := a[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Blend packed 16-bit integers from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := b[i+15:i] + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Broadcast the low packed 8-bit integer from "a" to all elements of "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := a[7:0] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Broadcast the low packed 8-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := a[7:0] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Broadcast the low packed 8-bit integer from "a" to all elements of "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := a[7:0] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + Broadcast the low packed 8-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := a[7:0] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Broadcast the low packed 8-bit integer from "a" to all elements of "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := a[7:0] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Broadcast the low packed 8-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := a[7:0] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Broadcast the low packed 8-bit integer from "a" to all elements of "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := a[7:0] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + Broadcast the low packed 8-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := a[7:0] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := a[15:0] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := a[15:0] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := a[15:0] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := a[15:0] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := a[15:0] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := a[15:0] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := a[15:0] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := a[15:0] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst" using writemask "k" - (elements are copied from "idx" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - off := 16*idx[i+3:i] - dst[i+15:i] := idx[i+4] ? b[off+15:off] : a[off+15:off] - ELSE - dst[i+15:i] := idx[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + off := 16*idx[i+3:i] + dst[i+15:i] := idx[i+4] ? b[off+15:off] : a[off+15:off] + ELSE + dst[i+15:i] := idx[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst" using writemask "k" - (elements are copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - off := 16*idx[i+3:i] - dst[i+15:i] := idx[i+4] ? b[off+15:off] : a[off+15:off] - ELSE - dst[i+15:i] := a[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + off := 16*idx[i+3:i] + dst[i+15:i] := idx[i+4] ? b[off+15:off] : a[off+15:off] + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - off := 16*idx[i+3:i] - dst[i+15:i] := idx[i+4] ? b[off+15:off] : a[off+15:off] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + off := 16*idx[i+3:i] + dst[i+15:i] := idx[i+4] ? b[off+15:off] : a[off+15:off] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst". - - FOR j := 0 to 15 - i := j*16 - off := 16*idx[i+3:i] - dst[i+15:i] := idx[i+4] ? b[off+15:off] : a[off+15:off] - ENDFOR - dst[MAX:256] := 0 - - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + off := 16*idx[i+3:i] + dst[i+15:i] := idx[i+4] ? b[off+15:off] : a[off+15:off] +ENDFOR +dst[MAX:256] := 0 + + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 16-bit integers in "a" and "b" using the corresponding selector and - index in "idx", and store the results in "dst" using writemask "k" (elements are copied - from "idx" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - off := 16*idx[i+2:i] - dst[i+15:i] := idx[i+3] ? b[off+15:off] : a[off+15:off] - ELSE - dst[i+15:i] := idx[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 16-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + off := 16*idx[i+2:i] + dst[i+15:i] := idx[i+3] ? b[off+15:off] : a[off+15:off] + ELSE + dst[i+15:i] := idx[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 16-bit integers in "a" and "b" using the corresponding selector and - index in "idx", and store the results in "dst" using writemask "k" (elements are copied - from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - off := 16*idx[i+2:i] - dst[i+15:i] := idx[i+3] ? b[off+15:off] : a[off+15:off] - ELSE - dst[i+15:i] := a[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 16-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + off := 16*idx[i+2:i] + dst[i+15:i] := idx[i+3] ? b[off+15:off] : a[off+15:off] + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 16-bit integers in "a" and "b" using the corresponding selector and - index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - off := 16*idx[i+2:i] - dst[i+15:i] := idx[i+3] ? b[off+15:off] : a[off+15:off] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 16-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + off := 16*idx[i+2:i] + dst[i+15:i] := idx[i+3] ? b[off+15:off] : a[off+15:off] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle 16-bit integers in "a" and "b" using the corresponding selector and - index in "idx", and store the results in "dst". - - FOR j := 0 to 7 - i := j*16 - off := 16*idx[i+2:i] - dst[i+15:i] := idx[i+3] ? b[off+15:off] : a[off+15:off] - ENDFOR - dst[MAX:128] := 0 - - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle 16-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + off := 16*idx[i+2:i] + dst[i+15:i] := idx[i+3] ? b[off+15:off] : a[off+15:off] +ENDFOR +dst[MAX:128] := 0 + + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 16-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - id := idx[i+3:i]*16 - IF k[j] - dst[i+15:i] := a[id+15:id] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 16-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + id := idx[i+3:i]*16 + IF k[j] + dst[i+15:i] := a[id+15:id] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle 16-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - id := idx[i+3:i]*16 - IF k[j] - dst[i+15:i] := a[id+15:id] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle 16-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + id := idx[i+3:i]*16 + IF k[j] + dst[i+15:i] := a[id+15:id] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Shuffle 16-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst". - - FOR j := 0 to 15 - i := j*16 - id := idx[i+3:i]*16 - dst[i+15:i] := a[id+15:id] - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + Shuffle 16-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + id := idx[i+3:i]*16 + dst[i+15:i] := a[id+15:id] +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 16-bit integers in "a" using the corresponding index in "idx", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - id := idx[i+2:i]*16 - IF k[j] - dst[i+15:i] := a[id+15:id] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 16-bit integers in "a" using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + id := idx[i+2:i]*16 + IF k[j] + dst[i+15:i] := a[id+15:id] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle 16-bit integers in "a" using the corresponding index in "idx", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - id := idx[i+2:i]*16 - IF k[j] - dst[i+15:i] := a[id+15:id] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle 16-bit integers in "a" using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + id := idx[i+2:i]*16 + IF k[j] + dst[i+15:i] := a[id+15:id] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Shuffle 16-bit integers in "a" using the corresponding index in "idx", and - store the results in "dst". - - FOR j := 0 to 7 - i := j*16 - id := idx[i+2:i]*16 - dst[i+15:i] := a[id+15:id] - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + Shuffle 16-bit integers in "a" using the corresponding index in "idx", and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + id := idx[i+2:i]*16 + dst[i+15:i] := a[id+15:id] +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Set each bit of mask register "k" based on the most significant bit of the - corresponding packed 8-bit integer in "a". - - FOR j := 0 to 31 - i := j*8 - IF a[i+7] - k[j] := 1 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + Set each bit of mask register "k" based on the most significant bit of the corresponding packed 8-bit integer in "a". + +FOR j := 0 to 31 + i := j*8 + IF a[i+7] + k[j] := 1 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Set each bit of mask register "k" based on the most significant bit of the - corresponding packed 8-bit integer in "a". - - FOR j := 0 to 15 - i := j*8 - IF a[i+7] - k[j] := 1 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + Set each bit of mask register "k" based on the most significant bit of the corresponding packed 8-bit integer in "a". + +FOR j := 0 to 15 + i := j*8 + IF a[i+7] + k[j] := 1 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Set each packed 8-bit integer in "dst" to all ones or all zeros based on the - value of the corresponding bit in "k". - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := 0xFF - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + Set each packed 8-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k". + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := 0xFF + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Set each packed 8-bit integer in "dst" to all ones or all zeros based on the - value of the corresponding bit in "k". - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := 0xFF - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + Set each packed 8-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k". + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := 0xFF + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Set each packed 16-bit integer in "dst" to all ones or all zeros based on the - value of the corresponding bit in "k". - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := 0xFFFF - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + Set each packed 16-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k". + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := 0xFFFF + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Set each packed 16-bit integer in "dst" to all ones or all zeros based on the - value of the corresponding bit in "k". - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := 0xFFFF - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + Set each packed 16-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k". + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := 0xFFFF + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Set each bit of mask register "k" based on the most significant bit of the - corresponding packed 16-bit integer in "a". - - FOR j := 0 to 15 - i := j*16 - IF a[i+15] - k[j] := 1 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + Set each bit of mask register "k" based on the most significant bit of the corresponding packed 16-bit integer in "a". + +FOR j := 0 to 15 + i := j*16 + IF a[i+15] + k[j] := 1 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Set each bit of mask register "k" based on the most significant bit of the - corresponding packed 16-bit integer in "a". - - FOR j := 0 to 7 - i := j*16 - IF a[i+15] - k[j] := 1 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + Set each bit of mask register "k" based on the most significant bit of the corresponding packed 16-bit integer in "a". + +FOR j := 0 to 7 + i := j*16 + IF a[i+15] + k[j] := 1 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle packed 8-bit integers in "a" according to shuffle control mask in the - corresponding 8-bit element of "b", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - IF b[i+7] == 1 + + + + + + Shuffle packed 8-bit integers in "a" according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + IF b[i+7] == 1 dst[i+7:i] := 0 - ELSE + ELSE index[4:0] := b[i+3:i] + (j & 0x10) dst[i+7:i] := a[index*8+7:index*8] - FI - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Swizzle + FI + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Swizzle
- - - - - Shuffle packed 8-bit integers in "a" according to shuffle control mask in the - corresponding 8-bit element of "b", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - IF b[i+7] == 1 + + + + + Shuffle packed 8-bit integers in "a" according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + IF b[i+7] == 1 dst[i+7:i] := 0 - ELSE + ELSE index[4:0] := b[i+3:i] + (j & 0x10) dst[i+7:i] := a[index*8+7:index*8] - FI - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Swizzle + FI + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Swizzle
- - - - - - Shuffle packed 8-bit integers in "a" according to shuffle control mask in the - corresponding 8-bit element of "b", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - IF b[i+7] == 1 + + + + + + Shuffle packed 8-bit integers in "a" according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + IF b[i+7] == 1 dst[i+7:i] := 0 - ELSE + ELSE index[3:0] := b[i+3:i] dst[i+7:i] := a[index*8+7:index*8] - FI - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Swizzle + FI + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Swizzle
- - - - - Shuffle packed 8-bit integers in "a" according to shuffle control mask in the - corresponding 8-bit element of "b", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - IF b[i+7] == 1 + + + + + Shuffle packed 8-bit integers in "a" according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + IF b[i+7] == 1 dst[i+7:i] := 0 - ELSE + ELSE index[3:0] := b[i+3:i] dst[i+7:i] := a[index*8+7:index*8] - FI - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Swizzle + FI + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Swizzle
- - - - - - Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of "a" using the - control in "imm8". Store the results in the high 64 bits of 128-bit lanes of "dst", with - the low 64 bits of 128-bit lanes being copied from from "a" to "dst", using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - tmp_dst[63:0] := a[63:0] - tmp_dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] - tmp_dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] - tmp_dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] - tmp_dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] - tmp_dst[191:128] := a[191:128] - tmp_dst[207:192] := (a >> (imm8[1:0] * 16))[207:192] - tmp_dst[223:208] := (a >> (imm8[3:2] * 16))[207:192] - tmp_dst[239:224] := (a >> (imm8[5:4] * 16))[207:192] - tmp_dst[255:240] := (a >> (imm8[7:6] * 16))[207:192] - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the high 64 bits of 128-bit lanes of "dst", with the low 64 bits of 128-bit lanes being copied from from "a" to "dst", using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[63:0] := a[63:0] +tmp_dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] +tmp_dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] +tmp_dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] +tmp_dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] +tmp_dst[191:128] := a[191:128] +tmp_dst[207:192] := (a >> (imm8[1:0] * 16))[207:192] +tmp_dst[223:208] := (a >> (imm8[3:2] * 16))[207:192] +tmp_dst[239:224] := (a >> (imm8[5:4] * 16))[207:192] +tmp_dst[255:240] := (a >> (imm8[7:6] * 16))[207:192] +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of "a" using the - control in "imm8". Store the results in the high 64 bits of 128-bit lanes of "dst", with - the low 64 bits of 128-bit lanes being copied from from "a" to "dst", using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - tmp_dst[63:0] := a[63:0] - tmp_dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] - tmp_dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] - tmp_dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] - tmp_dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] - tmp_dst[191:128] := a[191:128] - tmp_dst[207:192] := (a >> (imm8[1:0] * 16))[207:192] - tmp_dst[223:208] := (a >> (imm8[3:2] * 16))[207:192] - tmp_dst[239:224] := (a >> (imm8[5:4] * 16))[207:192] - tmp_dst[255:240] := (a >> (imm8[7:6] * 16))[207:192] - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the high 64 bits of 128-bit lanes of "dst", with the low 64 bits of 128-bit lanes being copied from from "a" to "dst", using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[63:0] := a[63:0] +tmp_dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] +tmp_dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] +tmp_dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] +tmp_dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] +tmp_dst[191:128] := a[191:128] +tmp_dst[207:192] := (a >> (imm8[1:0] * 16))[207:192] +tmp_dst[223:208] := (a >> (imm8[3:2] * 16))[207:192] +tmp_dst[239:224] := (a >> (imm8[5:4] * 16))[207:192] +tmp_dst[255:240] := (a >> (imm8[7:6] * 16))[207:192] +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 16-bit integers in the high 64 bits of "a" using the control in "imm8". - Store the results in the high 64 bits of "dst", with the low 64 bits being copied from - from "a" to "dst", using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - tmp_dst[63:0] := a[63:0] - tmp_dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] - tmp_dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] - tmp_dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] - tmp_dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 16-bit integers in the high 64 bits of "a" using the control in "imm8". Store the results in the high 64 bits of "dst", with the low 64 bits being copied from from "a" to "dst", using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[63:0] := a[63:0] +tmp_dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] +tmp_dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] +tmp_dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] +tmp_dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle 16-bit integers in the high 64 bits of "a" using the control in "imm8". - Store the results in the high 64 bits of "dst", with the low 64 bits being copied from - from "a" to "dst", using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - tmp_dst[63:0] := a[63:0] - tmp_dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] - tmp_dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] - tmp_dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] - tmp_dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle 16-bit integers in the high 64 bits of "a" using the control in "imm8". Store the results in the high 64 bits of "dst", with the low 64 bits being copied from from "a" to "dst", using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[63:0] := a[63:0] +tmp_dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] +tmp_dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] +tmp_dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] +tmp_dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of "a" using the - control in "imm8". Store the results in the low 64 bits of 128-bit lanes of "dst", with - the high 64 bits of 128-bit lanes being copied from from "a" to "dst", using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - tmp_dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] - tmp_dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] - tmp_dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] - tmp_dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] - tmp_dst[127:64] := a[127:64] - tmp_dst[143:128] := (a >> (imm8[1:0] * 16))[143:128] - tmp_dst[159:144] := (a >> (imm8[3:2] * 16))[143:128] - tmp_dst[175:160] := (a >> (imm8[5:4] * 16))[143:128] - tmp_dst[191:176] := (a >> (imm8[7:6] * 16))[143:128] - tmp_dst[255:192] := a[255:192] - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the low 64 bits of 128-bit lanes of "dst", with the high 64 bits of 128-bit lanes being copied from from "a" to "dst", using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] +tmp_dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] +tmp_dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] +tmp_dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] +tmp_dst[127:64] := a[127:64] +tmp_dst[143:128] := (a >> (imm8[1:0] * 16))[143:128] +tmp_dst[159:144] := (a >> (imm8[3:2] * 16))[143:128] +tmp_dst[175:160] := (a >> (imm8[5:4] * 16))[143:128] +tmp_dst[191:176] := (a >> (imm8[7:6] * 16))[143:128] +tmp_dst[255:192] := a[255:192] +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of "a" using the - control in "imm8". Store the results in the low 64 bits of 128-bit lanes of "dst", with - the high 64 bits of 128-bit lanes being copied from from "a" to "dst", using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - tmp_dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] - tmp_dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] - tmp_dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] - tmp_dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] - tmp_dst[127:64] := a[127:64] - tmp_dst[143:128] := (a >> (imm8[1:0] * 16))[143:128] - tmp_dst[159:144] := (a >> (imm8[3:2] * 16))[143:128] - tmp_dst[175:160] := (a >> (imm8[5:4] * 16))[143:128] - tmp_dst[191:176] := (a >> (imm8[7:6] * 16))[143:128] - tmp_dst[255:192] := a[255:192] - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the low 64 bits of 128-bit lanes of "dst", with the high 64 bits of 128-bit lanes being copied from from "a" to "dst", using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] +tmp_dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] +tmp_dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] +tmp_dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] +tmp_dst[127:64] := a[127:64] +tmp_dst[143:128] := (a >> (imm8[1:0] * 16))[143:128] +tmp_dst[159:144] := (a >> (imm8[3:2] * 16))[143:128] +tmp_dst[175:160] := (a >> (imm8[5:4] * 16))[143:128] +tmp_dst[191:176] := (a >> (imm8[7:6] * 16))[143:128] +tmp_dst[255:192] := a[255:192] +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 16-bit integers in the low 64 bits of "a" using the control in "imm8". - Store the results in the low 64 bits of "dst", with the high 64 bits being copied from - from "a" to "dst", using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - tmp_dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] - tmp_dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] - tmp_dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] - tmp_dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] - tmp_dst[127:64] := a[127:64] - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 16-bit integers in the low 64 bits of "a" using the control in "imm8". Store the results in the low 64 bits of "dst", with the high 64 bits being copied from from "a" to "dst", using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] +tmp_dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] +tmp_dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] +tmp_dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] +tmp_dst[127:64] := a[127:64] +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle 16-bit integers in the low 64 bits of "a" using the control in "imm8". - Store the results in the low 64 bits of "dst", with the high 64 bits being copied from - from "a" to "dst", using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - tmp_dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] - tmp_dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] - tmp_dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] - tmp_dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] - tmp_dst[127:64] := a[127:64] - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle 16-bit integers in the low 64 bits of "a" using the control in "imm8". Store the results in the low 64 bits of "dst", with the high 64 bits being copied from from "a" to "dst", using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] +tmp_dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] +tmp_dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] +tmp_dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] +tmp_dst[127:64] := a[127:64] +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave 8-bit integers from the high half of each 128-bit lane in - "a" and "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) { - dst[7:0] := src1[71:64] - dst[15:8] := src2[71:64] - dst[23:16] := src1[79:72] - dst[31:24] := src2[79:72] - dst[39:32] := src1[87:80] - dst[47:40] := src2[87:80] - dst[55:48] := src1[95:88] - dst[63:56] := src2[95:88] - dst[71:64] := src1[103:96] - dst[79:72] := src2[103:96] - dst[87:80] := src1[111:104] - dst[95:88] := src2[111:104] - dst[103:96] := src1[119:112] - dst[111:104] := src2[119:112] - dst[119:112] := src1[127:120] - dst[127:120] := src2[127:120] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_HIGH_BYTES(a[255:128], b[255:128]) - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave 8-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) { + dst[7:0] := src1[71:64] + dst[15:8] := src2[71:64] + dst[23:16] := src1[79:72] + dst[31:24] := src2[79:72] + dst[39:32] := src1[87:80] + dst[47:40] := src2[87:80] + dst[55:48] := src1[95:88] + dst[63:56] := src2[95:88] + dst[71:64] := src1[103:96] + dst[79:72] := src2[103:96] + dst[87:80] := src1[111:104] + dst[95:88] := src2[111:104] + dst[103:96] := src1[119:112] + dst[111:104] := src2[119:112] + dst[119:112] := src1[127:120] + dst[127:120] := src2[127:120] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_BYTES(a[255:128], b[255:128]) +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave 8-bit integers from the high half of each 128-bit lane in - "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) { - dst[7:0] := src1[71:64] - dst[15:8] := src2[71:64] - dst[23:16] := src1[79:72] - dst[31:24] := src2[79:72] - dst[39:32] := src1[87:80] - dst[47:40] := src2[87:80] - dst[55:48] := src1[95:88] - dst[63:56] := src2[95:88] - dst[71:64] := src1[103:96] - dst[79:72] := src2[103:96] - dst[87:80] := src1[111:104] - dst[95:88] := src2[111:104] - dst[103:96] := src1[119:112] - dst[111:104] := src2[119:112] - dst[119:112] := src1[127:120] - dst[127:120] := src2[127:120] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_HIGH_BYTES(a[255:128], b[255:128]) - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave 8-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) { + dst[7:0] := src1[71:64] + dst[15:8] := src2[71:64] + dst[23:16] := src1[79:72] + dst[31:24] := src2[79:72] + dst[39:32] := src1[87:80] + dst[47:40] := src2[87:80] + dst[55:48] := src1[95:88] + dst[63:56] := src2[95:88] + dst[71:64] := src1[103:96] + dst[79:72] := src2[103:96] + dst[87:80] := src1[111:104] + dst[95:88] := src2[111:104] + dst[103:96] := src1[119:112] + dst[111:104] := src2[119:112] + dst[119:112] := src1[127:120] + dst[127:120] := src2[127:120] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_BYTES(a[255:128], b[255:128]) +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave 8-bit integers from the high half of "a" and "b", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) { - dst[7:0] := src1[71:64] - dst[15:8] := src2[71:64] - dst[23:16] := src1[79:72] - dst[31:24] := src2[79:72] - dst[39:32] := src1[87:80] - dst[47:40] := src2[87:80] - dst[55:48] := src1[95:88] - dst[63:56] := src2[95:88] - dst[71:64] := src1[103:96] - dst[79:72] := src2[103:96] - dst[87:80] := src1[111:104] - dst[95:88] := src2[111:104] - dst[103:96] := src1[119:112] - dst[111:104] := src2[119:112] - dst[119:112] := src1[127:120] - dst[127:120] := src2[127:120] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave 8-bit integers from the high half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) { + dst[7:0] := src1[71:64] + dst[15:8] := src2[71:64] + dst[23:16] := src1[79:72] + dst[31:24] := src2[79:72] + dst[39:32] := src1[87:80] + dst[47:40] := src2[87:80] + dst[55:48] := src1[95:88] + dst[63:56] := src2[95:88] + dst[71:64] := src1[103:96] + dst[79:72] := src2[103:96] + dst[87:80] := src1[111:104] + dst[95:88] := src2[111:104] + dst[103:96] := src1[119:112] + dst[111:104] := src2[119:112] + dst[119:112] := src1[127:120] + dst[127:120] := src2[127:120] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave 8-bit integers from the high half of "a" and "b", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) { - dst[7:0] := src1[71:64] - dst[15:8] := src2[71:64] - dst[23:16] := src1[79:72] - dst[31:24] := src2[79:72] - dst[39:32] := src1[87:80] - dst[47:40] := src2[87:80] - dst[55:48] := src1[95:88] - dst[63:56] := src2[95:88] - dst[71:64] := src1[103:96] - dst[79:72] := src2[103:96] - dst[87:80] := src1[111:104] - dst[95:88] := src2[111:104] - dst[103:96] := src1[119:112] - dst[111:104] := src2[119:112] - dst[119:112] := src1[127:120] - dst[127:120] := src2[127:120] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave 8-bit integers from the high half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) { + dst[7:0] := src1[71:64] + dst[15:8] := src2[71:64] + dst[23:16] := src1[79:72] + dst[31:24] := src2[79:72] + dst[39:32] := src1[87:80] + dst[47:40] := src2[87:80] + dst[55:48] := src1[95:88] + dst[63:56] := src2[95:88] + dst[71:64] := src1[103:96] + dst[79:72] := src2[103:96] + dst[87:80] := src1[111:104] + dst[95:88] := src2[111:104] + dst[103:96] := src1[119:112] + dst[111:104] := src2[119:112] + dst[119:112] := src1[127:120] + dst[127:120] := src2[127:120] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave 16-bit integers from the high half of each 128-bit lane - in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) { - dst[15:0] := src1[79:64] - dst[31:16] := src2[79:64] - dst[47:32] := src1[95:80] - dst[63:48] := src2[95:80] - dst[79:64] := src1[111:96] - dst[95:80] := src2[111:96] - dst[111:96] := src1[127:112] - dst[127:112] := src2[127:112] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_HIGH_WORDS(a[255:128], b[255:128]) - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave 16-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) { + dst[15:0] := src1[79:64] + dst[31:16] := src2[79:64] + dst[47:32] := src1[95:80] + dst[63:48] := src2[95:80] + dst[79:64] := src1[111:96] + dst[95:80] := src2[111:96] + dst[111:96] := src1[127:112] + dst[127:112] := src2[127:112] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_WORDS(a[255:128], b[255:128]) +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave 16-bit integers from the high half of each 128-bit lane - in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) { - dst[15:0] := src1[79:64] - dst[31:16] := src2[79:64] - dst[47:32] := src1[95:80] - dst[63:48] := src2[95:80] - dst[79:64] := src1[111:96] - dst[95:80] := src2[111:96] - dst[111:96] := src1[127:112] - dst[127:112] := src2[127:112] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_HIGH_WORDS(a[255:128], b[255:128]) - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave 16-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) { + dst[15:0] := src1[79:64] + dst[31:16] := src2[79:64] + dst[47:32] := src1[95:80] + dst[63:48] := src2[95:80] + dst[79:64] := src1[111:96] + dst[95:80] := src2[111:96] + dst[111:96] := src1[127:112] + dst[127:112] := src2[127:112] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_WORDS(a[255:128], b[255:128]) +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave 16-bit integers from the high half of "a" and "b", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) { - dst[15:0] := src1[79:64] - dst[31:16] := src2[79:64] - dst[47:32] := src1[95:80] - dst[63:48] := src2[95:80] - dst[79:64] := src1[111:96] - dst[95:80] := src2[111:96] - dst[111:96] := src1[127:112] - dst[127:112] := src2[127:112] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave 16-bit integers from the high half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) { + dst[15:0] := src1[79:64] + dst[31:16] := src2[79:64] + dst[47:32] := src1[95:80] + dst[63:48] := src2[95:80] + dst[79:64] := src1[111:96] + dst[95:80] := src2[111:96] + dst[111:96] := src1[127:112] + dst[127:112] := src2[127:112] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave 16-bit integers from the high half of "a" and "b", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) { - dst[15:0] := src1[79:64] - dst[31:16] := src2[79:64] - dst[47:32] := src1[95:80] - dst[63:48] := src2[95:80] - dst[79:64] := src1[111:96] - dst[95:80] := src2[111:96] - dst[111:96] := src1[127:112] - dst[127:112] := src2[127:112] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave 16-bit integers from the high half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) { + dst[15:0] := src1[79:64] + dst[31:16] := src2[79:64] + dst[47:32] := src1[95:80] + dst[63:48] := src2[95:80] + dst[79:64] := src1[111:96] + dst[95:80] := src2[111:96] + dst[111:96] := src1[127:112] + dst[127:112] := src2[127:112] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave 8-bit integers from the low half of each 128-bit lane in - "a" and "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) { - dst[7:0] := src1[7:0] - dst[15:8] := src2[7:0] - dst[23:16] := src1[15:8] - dst[31:24] := src2[15:8] - dst[39:32] := src1[23:16] - dst[47:40] := src2[23:16] - dst[55:48] := src1[31:24] - dst[63:56] := src2[31:24] - dst[71:64] := src1[39:32] - dst[79:72] := src2[39:32] - dst[87:80] := src1[47:40] - dst[95:88] := src2[47:40] - dst[103:96] := src1[55:48] - dst[111:104] := src2[55:48] - dst[119:112] := src1[63:56] - dst[127:120] := src2[63:56] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_BYTES(a[255:128], b[255:128]) - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave 8-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) { + dst[7:0] := src1[7:0] + dst[15:8] := src2[7:0] + dst[23:16] := src1[15:8] + dst[31:24] := src2[15:8] + dst[39:32] := src1[23:16] + dst[47:40] := src2[23:16] + dst[55:48] := src1[31:24] + dst[63:56] := src2[31:24] + dst[71:64] := src1[39:32] + dst[79:72] := src2[39:32] + dst[87:80] := src1[47:40] + dst[95:88] := src2[47:40] + dst[103:96] := src1[55:48] + dst[111:104] := src2[55:48] + dst[119:112] := src1[63:56] + dst[127:120] := src2[63:56] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_BYTES(a[255:128], b[255:128]) +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave 8-bit integers from the low half of each 128-bit lane in - "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) { - dst[7:0] := src1[7:0] - dst[15:8] := src2[7:0] - dst[23:16] := src1[15:8] - dst[31:24] := src2[15:8] - dst[39:32] := src1[23:16] - dst[47:40] := src2[23:16] - dst[55:48] := src1[31:24] - dst[63:56] := src2[31:24] - dst[71:64] := src1[39:32] - dst[79:72] := src2[39:32] - dst[87:80] := src1[47:40] - dst[95:88] := src2[47:40] - dst[103:96] := src1[55:48] - dst[111:104] := src2[55:48] - dst[119:112] := src1[63:56] - dst[127:120] := src2[63:56] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_BYTES(a[255:128], b[255:128]) - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave 8-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) { + dst[7:0] := src1[7:0] + dst[15:8] := src2[7:0] + dst[23:16] := src1[15:8] + dst[31:24] := src2[15:8] + dst[39:32] := src1[23:16] + dst[47:40] := src2[23:16] + dst[55:48] := src1[31:24] + dst[63:56] := src2[31:24] + dst[71:64] := src1[39:32] + dst[79:72] := src2[39:32] + dst[87:80] := src1[47:40] + dst[95:88] := src2[47:40] + dst[103:96] := src1[55:48] + dst[111:104] := src2[55:48] + dst[119:112] := src1[63:56] + dst[127:120] := src2[63:56] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_BYTES(a[255:128], b[255:128]) +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave 8-bit integers from the low half of "a" and "b", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) { - dst[7:0] := src1[7:0] - dst[15:8] := src2[7:0] - dst[23:16] := src1[15:8] - dst[31:24] := src2[15:8] - dst[39:32] := src1[23:16] - dst[47:40] := src2[23:16] - dst[55:48] := src1[31:24] - dst[63:56] := src2[31:24] - dst[71:64] := src1[39:32] - dst[79:72] := src2[39:32] - dst[87:80] := src1[47:40] - dst[95:88] := src2[47:40] - dst[103:96] := src1[55:48] - dst[111:104] := src2[55:48] - dst[119:112] := src1[63:56] - dst[127:120] := src2[63:56] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave 8-bit integers from the low half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) { + dst[7:0] := src1[7:0] + dst[15:8] := src2[7:0] + dst[23:16] := src1[15:8] + dst[31:24] := src2[15:8] + dst[39:32] := src1[23:16] + dst[47:40] := src2[23:16] + dst[55:48] := src1[31:24] + dst[63:56] := src2[31:24] + dst[71:64] := src1[39:32] + dst[79:72] := src2[39:32] + dst[87:80] := src1[47:40] + dst[95:88] := src2[47:40] + dst[103:96] := src1[55:48] + dst[111:104] := src2[55:48] + dst[119:112] := src1[63:56] + dst[127:120] := src2[63:56] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave 8-bit integers from the low half of "a" and "b", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) { - dst[7:0] := src1[7:0] - dst[15:8] := src2[7:0] - dst[23:16] := src1[15:8] - dst[31:24] := src2[15:8] - dst[39:32] := src1[23:16] - dst[47:40] := src2[23:16] - dst[55:48] := src1[31:24] - dst[63:56] := src2[31:24] - dst[71:64] := src1[39:32] - dst[79:72] := src2[39:32] - dst[87:80] := src1[47:40] - dst[95:88] := src2[47:40] - dst[103:96] := src1[55:48] - dst[111:104] := src2[55:48] - dst[119:112] := src1[63:56] - dst[127:120] := src2[63:56] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave 8-bit integers from the low half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) { + dst[7:0] := src1[7:0] + dst[15:8] := src2[7:0] + dst[23:16] := src1[15:8] + dst[31:24] := src2[15:8] + dst[39:32] := src1[23:16] + dst[47:40] := src2[23:16] + dst[55:48] := src1[31:24] + dst[63:56] := src2[31:24] + dst[71:64] := src1[39:32] + dst[79:72] := src2[39:32] + dst[87:80] := src1[47:40] + dst[95:88] := src2[47:40] + dst[103:96] := src1[55:48] + dst[111:104] := src2[55:48] + dst[119:112] := src1[63:56] + dst[127:120] := src2[63:56] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave 16-bit integers from the low half of each 128-bit lane in - "a" and "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) { - dst[15:0] := src1[15:0] - dst[31:16] := src2[15:0] - dst[47:32] := src1[31:16] - dst[63:48] := src2[31:16] - dst[79:64] := src1[47:32] - dst[95:80] := src2[47:32] - dst[111:96] := src1[63:48] - dst[127:112] := src2[63:48] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_WORDS(a[255:128], b[255:128]) - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave 16-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) { + dst[15:0] := src1[15:0] + dst[31:16] := src2[15:0] + dst[47:32] := src1[31:16] + dst[63:48] := src2[31:16] + dst[79:64] := src1[47:32] + dst[95:80] := src2[47:32] + dst[111:96] := src1[63:48] + dst[127:112] := src2[63:48] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_WORDS(a[255:128], b[255:128]) +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave 16-bit integers from the low half of each 128-bit lane in - "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) { - dst[15:0] := src1[15:0] - dst[31:16] := src2[15:0] - dst[47:32] := src1[31:16] - dst[63:48] := src2[31:16] - dst[79:64] := src1[47:32] - dst[95:80] := src2[47:32] - dst[111:96] := src1[63:48] - dst[127:112] := src2[63:48] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_WORDS(a[255:128], b[255:128]) - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave 16-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) { + dst[15:0] := src1[15:0] + dst[31:16] := src2[15:0] + dst[47:32] := src1[31:16] + dst[63:48] := src2[31:16] + dst[79:64] := src1[47:32] + dst[95:80] := src2[47:32] + dst[111:96] := src1[63:48] + dst[127:112] := src2[63:48] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_WORDS(a[255:128], b[255:128]) +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave 16-bit integers from the low half of "a" and "b", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) { - dst[15:0] := src1[15:0] - dst[31:16] := src2[15:0] - dst[47:32] := src1[31:16] - dst[63:48] := src2[31:16] - dst[79:64] := src1[47:32] - dst[95:80] := src2[47:32] - dst[111:96] := src1[63:48] - dst[127:112] := src2[63:48] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave 16-bit integers from the low half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) { + dst[15:0] := src1[15:0] + dst[31:16] := src2[15:0] + dst[47:32] := src1[31:16] + dst[63:48] := src2[31:16] + dst[79:64] := src1[47:32] + dst[95:80] := src2[47:32] + dst[111:96] := src1[63:48] + dst[127:112] := src2[63:48] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave 16-bit integers from the low half of "a" and "b", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) { - dst[15:0] := src1[15:0] - dst[31:16] := src2[15:0] - dst[47:32] := src1[31:16] - dst[63:48] := src2[31:16] - dst[79:64] := src1[47:32] - dst[95:80] := src2[47:32] - dst[111:96] := src1[63:48] - dst[127:112] := src2[63:48] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave 16-bit integers from the low half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) { + dst[15:0] := src1[15:0] + dst[31:16] := src2[15:0] + dst[47:32] := src1[31:16] + dst[63:48] := src2[31:16] + dst[79:64] := src1[47:32] + dst[95:80] := src2[47:32] + dst[111:96] := src1[63:48] + dst[127:112] := src2[63:48] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Load packed 16-bit integers from memory into "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := MEM[mem_addr+i+15:mem_addr+i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Load + + + + + Load packed 16-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := MEM[mem_addr+i+15:mem_addr+i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Load
- - - - Load packed 16-bit integers from memory into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := MEM[mem_addr+i+15:mem_addr+i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Load + + + + Load packed 16-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := MEM[mem_addr+i+15:mem_addr+i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Load
- - - - - Load packed 16-bit integers from memory into "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := MEM[mem_addr+i+15:mem_addr+i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Load + + + + + Load packed 16-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := MEM[mem_addr+i+15:mem_addr+i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Load
- - - - Load packed 16-bit integers from memory into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := MEM[mem_addr+i+15:mem_addr+i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Load + + + + Load packed 16-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := MEM[mem_addr+i+15:mem_addr+i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Load
- - - - - Load packed 8-bit integers from memory into "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := MEM[mem_addr+i+7:mem_addr+i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Load + + + + + Load packed 8-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := MEM[mem_addr+i+7:mem_addr+i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Load
- - - - Load packed 8-bit integers from memory into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := MEM[mem_addr+i+7:mem_addr+i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Load + + + + Load packed 8-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := MEM[mem_addr+i+7:mem_addr+i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Load
- - - - - Load packed 8-bit integers from memory into "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := MEM[mem_addr+i+7:mem_addr+i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Load + + + + + Load packed 8-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := MEM[mem_addr+i+7:mem_addr+i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Load
- - - - Load packed 8-bit integers from memory into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := MEM[mem_addr+i+7:mem_addr+i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Load + + + + Load packed 8-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := MEM[mem_addr+i+7:mem_addr+i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Load
- - - Load 256-bits (composed of 16 packed 16-bit integers) from memory into "dst". - "mem_addr" does not need to be aligned on any particular boundary. - - dst[255:0] := MEM[mem_addr+255:mem_addr] - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Load + + + Load 256-bits (composed of 16 packed 16-bit integers) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[255:0] := MEM[mem_addr+255:mem_addr] +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Load
- - - Load 256-bits (composed of 32 packed 8-bit integers) from memory into "dst". - "mem_addr" does not need to be aligned on any particular boundary. - - dst[255:0] := MEM[mem_addr+255:mem_addr] - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Load + + + Load 256-bits (composed of 32 packed 8-bit integers) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[255:0] := MEM[mem_addr+255:mem_addr] +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Load
- - - Load 128-bits (composed of 8 packed 16-bit integers) from memory into "dst". - "mem_addr" does not need to be aligned on any particular boundary. - - dst[127:0] := MEM[mem_addr+127:mem_addr] - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Load + + + Load 128-bits (composed of 8 packed 16-bit integers) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[127:0] := MEM[mem_addr+127:mem_addr] +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Load
- - - Load 128-bits (composed of 16 packed 8-bit integers) from memory into "dst". - "mem_addr" does not need to be aligned on any particular boundary. - - dst[127:0] := MEM[mem_addr+127:mem_addr] - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Load + + + Load 128-bits (composed of 16 packed 8-bit integers) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[127:0] := MEM[mem_addr+127:mem_addr] +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Load
- - - - - Move packed 16-bit integers from "a" into "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := a[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Move + + + + + Move packed 16-bit integers from "a" into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Move
- - - - Move packed 16-bit integers from "a" into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := a[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Move + + + + Move packed 16-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Move
- - - - - Move packed 16-bit integers from "a" into "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := a[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Move + + + + + Move packed 16-bit integers from "a" into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Move
- - - - Move packed 16-bit integers from "a" into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := a[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Move + + + + Move packed 16-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Move
- - - - - Move packed 8-bit integers from "a" into "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := a[i+7:i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Move + + + + + Move packed 8-bit integers from "a" into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Move
- - - - Move packed 8-bit integers from "a" into "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := a[i+7:i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Move + + + + Move packed 8-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Move
- - - - - Move packed 8-bit integers from "a" into "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := a[i+7:i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Move + + + + + Move packed 8-bit integers from "a" into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Move
- - - - Move packed 8-bit integers from "a" into "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := a[i+7:i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Move + + + + Move packed 8-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Move
- - - - - Store packed 16-bit integers from "a" into memory using writemask "k". - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 15 - i := j*16 - IF k[j] - MEM[mem_addr+i+15:mem_addr+i] := a[i+15:i] - FI - ENDFOR - - - AVX512BW - AVX512VL -
immintrin.h
- Store + + + + + Store packed 16-bit integers from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 15 + i := j*16 + IF k[j] + MEM[mem_addr+i+15:mem_addr+i] := a[i+15:i] + FI +ENDFOR + + + AVX512BW + AVX512VL +
immintrin.h
+ Store
- - - - - Store packed 16-bit integers from "a" into memory using writemask "k". - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 7 - i := j*16 - IF k[j] - MEM[mem_addr+i+15:mem_addr+i] := a[i+15:i] - FI - ENDFOR - - - AVX512BW - AVX512VL -
immintrin.h
- Store + + + + + Store packed 16-bit integers from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 7 + i := j*16 + IF k[j] + MEM[mem_addr+i+15:mem_addr+i] := a[i+15:i] + FI +ENDFOR + + + AVX512BW + AVX512VL +
immintrin.h
+ Store
- - - - - Store packed 8-bit integers from "a" into memory using writemask "k". - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 31 - i := j*8 - IF k[j] - MEM[mem_addr+i+7:mem_addr+i] := a[i+7:i] - FI - ENDFOR - - - AVX512BW - AVX512VL -
immintrin.h
- Store + + + + + Store packed 8-bit integers from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 31 + i := j*8 + IF k[j] + MEM[mem_addr+i+7:mem_addr+i] := a[i+7:i] + FI +ENDFOR + + + AVX512BW + AVX512VL +
immintrin.h
+ Store
- - - - - Store packed 8-bit integers from "a" into memory using writemask "k". - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 15 - i := j*8 - IF k[j] - MEM[mem_addr+i+7:mem_addr+i] := a[i+7:i] - FI - ENDFOR - - - AVX512BW - AVX512VL -
immintrin.h
- Store + + + + + Store packed 8-bit integers from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 15 + i := j*8 + IF k[j] + MEM[mem_addr+i+7:mem_addr+i] := a[i+7:i] + FI +ENDFOR + + + AVX512BW + AVX512VL +
immintrin.h
+ Store
- - - - Store 256-bits (composed of 16 packed 16-bit integers) from "a" into memory. - "mem_addr" does not need to be aligned on any particular boundary. - - MEM[mem_addr+255:mem_addr] := a[255:0] - - - AVX512BW - AVX512VL -
immintrin.h
- Store + + + + Store 256-bits (composed of 16 packed 16-bit integers) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+255:mem_addr] := a[255:0] + + + AVX512BW + AVX512VL +
immintrin.h
+ Store
- - - - Store 256-bits (composed of 32 packed 8-bit integers) from "a" into memory. - "mem_addr" does not need to be aligned on any particular boundary. - - MEM[mem_addr+255:mem_addr] := a[255:0] - - - AVX512BW - AVX512VL -
immintrin.h
- Store + + + + Store 256-bits (composed of 32 packed 8-bit integers) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+255:mem_addr] := a[255:0] + + + AVX512BW + AVX512VL +
immintrin.h
+ Store
- - - - Store 128-bits (composed of 8 packed 16-bit integers) from "a" into memory. - "mem_addr" does not need to be aligned on any particular boundary. - - MEM[mem_addr+127:mem_addr] := a[127:0] - - - AVX512BW - AVX512VL -
immintrin.h
- Store + + + + Store 128-bits (composed of 8 packed 16-bit integers) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+127:mem_addr] := a[127:0] + + + AVX512BW + AVX512VL +
immintrin.h
+ Store
- - - - Store 128-bits (composed of 16 packed 8-bit integers) from "a" into memory. - "mem_addr" does not need to be aligned on any particular boundary. - - MEM[mem_addr+127:mem_addr] := a[127:0] - - - AVX512BW - AVX512VL -
immintrin.h
- Store + + + + Store 128-bits (composed of 16 packed 8-bit integers) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+127:mem_addr] := a[127:0] + + + AVX512BW + AVX512VL +
immintrin.h
+ Store
- - - - - Compute the absolute value of packed signed 8-bit integers in "a", and store - the unsigned results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := ABS(a[i+7:i]) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compute the absolute value of packed signed 8-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := ABS(a[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Compute the absolute value of packed signed 8-bit integers in "a", and store - the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := ABS(a[i+7:i]) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + Compute the absolute value of packed signed 8-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := ABS(a[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compute the absolute value of packed signed 8-bit integers in "a", and store - the unsigned results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := ABS(a[i+7:i]) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compute the absolute value of packed signed 8-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := ABS(a[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Compute the absolute value of packed signed 8-bit integers in "a", and store - the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := ABS(a[i+7:i]) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + Compute the absolute value of packed signed 8-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := ABS(a[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compute the absolute value of packed signed 16-bit integers in "a", and store - the unsigned results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := ABS(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compute the absolute value of packed signed 16-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := ABS(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Compute the absolute value of packed signed 16-bit integers in "a", and store - the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := ABS(a[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + Compute the absolute value of packed signed 16-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := ABS(a[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compute the absolute value of packed signed 16-bit integers in "a", and store - the unsigned results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := ABS(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compute the absolute value of packed signed 16-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := ABS(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Compute the absolute value of packed signed 16-bit integers in "a", and store - the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := ABS(a[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + Compute the absolute value of packed signed 16-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := ABS(a[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Add packed 8-bit integers in "a" and "b", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := a[i+7:i] + b[i+7:i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Add packed 8-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] + b[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Add packed 8-bit integers in "a" and "b", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := a[i+7:i] + b[i+7:i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Add packed 8-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] + b[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Add packed 8-bit integers in "a" and "b", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := a[i+7:i] + b[i+7:i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Add packed 8-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] + b[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Add packed 8-bit integers in "a" and "b", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := a[i+7:i] + b[i+7:i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Add packed 8-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] + b[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Add packed signed 8-bit integers in "a" and "b" using saturation, and store the - results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] ) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Add packed signed 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] ) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Add packed signed 8-bit integers in "a" and "b" using saturation, and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] ) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Add packed signed 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] ) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Add packed signed 8-bit integers in "a" and "b" using saturation, and store the - results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] ) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Add packed signed 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] ) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Add packed signed 8-bit integers in "a" and "b" using saturation, and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] ) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Add packed signed 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] ) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Add packed signed 16-bit integers in "a" and "b" using saturation, and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] ) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Add packed signed 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] ) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Add packed signed 16-bit integers in "a" and "b" using saturation, and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] ) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Add packed signed 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] ) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Add packed signed 16-bit integers in "a" and "b" using saturation, and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] ) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Add packed signed 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] ) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Add packed signed 16-bit integers in "a" and "b" using saturation, and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] ) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Add packed signed 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] ) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] ) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] ) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] ) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] ) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] ) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] ) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] ) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] ) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] ) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] ) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] ) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] ) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] ) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] ) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] ) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] ) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Add packed 16-bit integers in "a" and "b", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := a[i+15:i] + b[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Add packed 16-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] + b[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Add packed 16-bit integers in "a" and "b", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := a[i+15:i] + b[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Add packed 16-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] + b[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Add packed 16-bit integers in "a" and "b", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := a[i+15:i] + b[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Add packed 16-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] + b[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Add packed 16-bit integers in "a" and "b", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := a[i+15:i] + b[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Add packed 16-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] + b[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Average packed unsigned 8-bit integers in "a" and "b", and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Average packed unsigned 8-bit integers in "a" and "b", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Average packed unsigned 8-bit integers in "a" and "b", and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Average packed unsigned 8-bit integers in "a" and "b", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Average packed unsigned 16-bit integers in "a" and "b", and store the results - in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Average packed unsigned 16-bit integers in "a" and "b", and store the results - in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is - not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Average packed unsigned 16-bit integers in "a" and "b", and store the results - in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Average packed unsigned 16-bit integers in "a" and "b", and store the results - in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is - not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed unsigned 8-bit integers in "a" by packed signed 8-bit integers - in "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs - of intermediate signed 16-bit integers, and pack the saturated results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed unsigned 8-bit integers in "a" by packed signed 8-bit integers in "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed unsigned 8-bit integers in "a" by packed signed 8-bit integers - in "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs - of intermediate signed 16-bit integers, and pack the saturated results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed unsigned 8-bit integers in "a" by packed signed 8-bit integers in "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed unsigned 8-bit integers in "a" by packed signed 8-bit integers - in "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs - of intermediate signed 16-bit integers, and pack the saturated results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed unsigned 8-bit integers in "a" by packed signed 8-bit integers in "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed unsigned 8-bit integers in "a" by packed signed 8-bit integers - in "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs - of intermediate signed 16-bit integers, and pack the saturated results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed unsigned 8-bit integers in "a" by packed signed 8-bit integers in "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed signed 16-bit integers in "a" and "b", producing intermediate - signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, - and pack the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + - SignExtend32(a[i+15:i]*b[i+15:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + SignExtend32(a[i+15:i]*b[i+15:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed signed 16-bit integers in "a" and "b", producing intermediate - signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, - and pack the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + - SignExtend32(a[i+15:i]*b[i+15:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + SignExtend32(a[i+15:i]*b[i+15:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed signed 16-bit integers in "a" and "b", producing intermediate - signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, - and pack the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + - SignExtend32(a[i+15:i]*b[i+15:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + SignExtend32(a[i+15:i]*b[i+15:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed signed 16-bit integers in "a" and "b", producing intermediate - signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, - and pack the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + - SignExtend32(a[i+15:i]*b[i+15:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + SignExtend32(a[i+15:i]*b[i+15:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed signed 8-bit integers in "a" and "b", and store packed maximum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed signed 8-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed signed 8-bit integers in "a" and "b", and store packed maximum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed signed 8-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed signed 8-bit integers in "a" and "b", and store packed maximum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed signed 8-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed signed 8-bit integers in "a" and "b", and store packed maximum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed signed 8-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed signed 16-bit integers in "a" and "b", and store packed maximum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed signed 16-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed signed 16-bit integers in "a" and "b", and store packed maximum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed signed 16-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed signed 16-bit integers in "a" and "b", and store packed maximum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed signed 16-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed signed 16-bit integers in "a" and "b", and store packed maximum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed signed 16-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed unsigned 16-bit integers in "a" and "b", and store packed - maximum values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed unsigned 16-bit integers in "a" and "b", and store packed - maximum values in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed unsigned 16-bit integers in "a" and "b", and store packed - maximum values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed unsigned 16-bit integers in "a" and "b", and store packed - maximum values in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed signed 8-bit integers in "a" and "b", and store packed minimum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed signed 8-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed signed 8-bit integers in "a" and "b", and store packed minimum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed signed 8-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed signed 8-bit integers in "a" and "b", and store packed minimum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed signed 8-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed signed 8-bit integers in "a" and "b", and store packed minimum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed signed 8-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed signed 16-bit integers in "a" and "b", and store packed minimum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed signed 16-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed signed 16-bit integers in "a" and "b", and store packed minimum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed signed 16-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed signed 16-bit integers in "a" and "b", and store packed minimum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed signed 16-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed signed 16-bit integers in "a" and "b", and store packed minimum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed signed 16-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed unsigned 16-bit integers in "a" and "b", and store packed - minimum values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed unsigned 16-bit integers in "a" and "b", and store packed - minimum values in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed unsigned 16-bit integers in "a" and "b", and store packed - minimum values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed unsigned 16-bit integers in "a" and "b", and store packed - minimum values in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed signed 16-bit integers in "a" and "b", producing intermediate - signed 32-bit integers. Truncate each intermediate integer to the 18 most significant - bits, round by adding 1, and store bits [16:1] to "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) >> 14) + 1 - dst[i+15:i] := tmp[16:1] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) >> 14) + 1 + dst[i+15:i] := tmp[16:1] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed signed 16-bit integers in "a" and "b", producing intermediate - signed 32-bit integers. Truncate each intermediate integer to the 18 most significant - bits, round by adding 1, and store bits [16:1] to "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) >> 14) + 1 - dst[i+15:i] := tmp[16:1] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) >> 14) + 1 + dst[i+15:i] := tmp[16:1] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed signed 16-bit integers in "a" and "b", producing intermediate - signed 32-bit integers. Truncate each intermediate integer to the 18 most significant - bits, round by adding 1, and store bits [16:1] to "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) >> 14) + 1 - dst[i+15:i] := tmp[16:1] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) >> 14) + 1 + dst[i+15:i] := tmp[16:1] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed signed 16-bit integers in "a" and "b", producing intermediate - signed 32-bit integers. Truncate each intermediate integer to the 18 most significant - bits, round by adding 1, and store bits [16:1] to "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) >> 14) + 1 - dst[i+15:i] := tmp[16:1] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) >> 14) + 1 + dst[i+15:i] := tmp[16:1] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply the packed unsigned 16-bit integers in "a" and "b", producing - intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - tmp[31:0] := a[i+15:i] * b[i+15:i] - dst[i+15:i] := tmp[31:16] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply the packed unsigned 16-bit integers in "a" and "b", producing - intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - tmp[31:0] := a[i+15:i] * b[i+15:i] - dst[i+15:i] := tmp[31:16] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply the packed unsigned 16-bit integers in "a" and "b", producing - intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - tmp[31:0] := a[i+15:i] * b[i+15:i] - dst[i+15:i] := tmp[31:16] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply the packed unsigned 16-bit integers in "a" and "b", producing - intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - tmp[31:0] := a[i+15:i] * b[i+15:i] - dst[i+15:i] := tmp[31:16] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply the packed signed 16-bit integers in "a" and "b", producing - intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) - dst[i+15:i] := tmp[31:16] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply the packed signed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply the packed signed 16-bit integers in "a" and "b", producing - intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) - dst[i+15:i] := tmp[31:16] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply the packed signed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply the packed signed 16-bit integers in "a" and "b", producing - intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) - dst[i+15:i] := tmp[31:16] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply the packed signed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply the packed signed 16-bit integers in "a" and "b", producing - intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) - dst[i+15:i] := tmp[31:16] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply the packed signed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply the packed 16-bit integers in "a" and "b", producing intermediate - 32-bit integers, and store the low 16 bits of the intermediate integers in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) - dst[i+15:i] := tmp[15:0] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) + dst[i+15:i] := tmp[15:0] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply the packed 16-bit integers in "a" and "b", producing intermediate - 32-bit integers, and store the low 16 bits of the intermediate integers in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) - dst[i+15:i] := tmp[15:0] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) + dst[i+15:i] := tmp[15:0] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply the packed 16-bit integers in "a" and "b", producing intermediate - 32-bit integers, and store the low 16 bits of the intermediate integers in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) - dst[i+15:i] := tmp[15:0] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) + dst[i+15:i] := tmp[15:0] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply the packed 16-bit integers in "a" and "b", producing intermediate - 32-bit integers, and store the low 16 bits of the intermediate integers in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) - dst[i+15:i] := tmp[15:0] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) + dst[i+15:i] := tmp[15:0] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := a[i+7:i] - b[i+7:i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] - b[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := a[i+7:i] - b[i+7:i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] - b[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := a[i+7:i] - b[i+7:i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] - b[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := a[i+7:i] - b[i+7:i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] - b[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" - using saturation, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i]) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" - using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i]) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" - using saturation, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i]) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" - using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i]) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in - "a" using saturation, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in - "a" using saturation, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in - "a" using saturation, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in - "a" using saturation, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit - integers in "a" using saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i]) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit - integers in "a" using saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i]) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit - integers in "a" using saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i]) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit - integers in "a" using saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i]) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit - integers in "a" using saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit - integers in "a" using saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit - integers in "a" using saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit - integers in "a" using saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := a[i+15:i] - b[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] - b[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := a[i+15:i] - b[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] - b[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := a[i+15:i] - b[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] - b[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := a[i+15:i] - b[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] - b[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- Miscellaneous - - - - - - Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit - integers using signed saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - tmp_dst[15:0] := Saturate16(a[31:0]) - tmp_dst[31:16] := Saturate16(a[63:32]) - tmp_dst[47:32] := Saturate16(a[95:64]) - tmp_dst[63:48] := Saturate16(a[127:96]) - tmp_dst[79:64] := Saturate16(b[31:0]) - tmp_dst[95:80] := Saturate16(b[63:32]) - tmp_dst[111:96] := Saturate16(b[95:64]) - tmp_dst[127:112] := Saturate16(b[127:96]) - tmp_dst[143:128] := Saturate16(a[159:128]) - tmp_dst[159:144] := Saturate16(a[191:160]) - tmp_dst[175:160] := Saturate16(a[223:192]) - tmp_dst[191:176] := Saturate16(a[255:224]) - tmp_dst[207:192] := Saturate16(b[159:128]) - tmp_dst[223:208] := Saturate16(b[191:160]) - tmp_dst[239:224] := Saturate16(b[223:192]) - tmp_dst[255:240] := Saturate16(b[255:224]) - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + Miscellaneous + + + + + + Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[15:0] := Saturate16(a[31:0]) +tmp_dst[31:16] := Saturate16(a[63:32]) +tmp_dst[47:32] := Saturate16(a[95:64]) +tmp_dst[63:48] := Saturate16(a[127:96]) +tmp_dst[79:64] := Saturate16(b[31:0]) +tmp_dst[95:80] := Saturate16(b[63:32]) +tmp_dst[111:96] := Saturate16(b[95:64]) +tmp_dst[127:112] := Saturate16(b[127:96]) +tmp_dst[143:128] := Saturate16(a[159:128]) +tmp_dst[159:144] := Saturate16(a[191:160]) +tmp_dst[175:160] := Saturate16(a[223:192]) +tmp_dst[191:176] := Saturate16(a[255:224]) +tmp_dst[207:192] := Saturate16(b[159:128]) +tmp_dst[223:208] := Saturate16(b[191:160]) +tmp_dst[239:224] := Saturate16(b[223:192]) +tmp_dst[255:240] := Saturate16(b[255:224]) +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- Miscellaneous - - - - - Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit - integers using signed saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - tmp_dst[15:0] := Saturate16(a[31:0]) - tmp_dst[31:16] := Saturate16(a[63:32]) - tmp_dst[47:32] := Saturate16(a[95:64]) - tmp_dst[63:48] := Saturate16(a[127:96]) - tmp_dst[79:64] := Saturate16(b[31:0]) - tmp_dst[95:80] := Saturate16(b[63:32]) - tmp_dst[111:96] := Saturate16(b[95:64]) - tmp_dst[127:112] := Saturate16(b[127:96]) - tmp_dst[143:128] := Saturate16(a[159:128]) - tmp_dst[159:144] := Saturate16(a[191:160]) - tmp_dst[175:160] := Saturate16(a[223:192]) - tmp_dst[191:176] := Saturate16(a[255:224]) - tmp_dst[207:192] := Saturate16(b[159:128]) - tmp_dst[223:208] := Saturate16(b[191:160]) - tmp_dst[239:224] := Saturate16(b[223:192]) - tmp_dst[255:240] := Saturate16(b[255:224]) - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + Miscellaneous + + + + + Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[15:0] := Saturate16(a[31:0]) +tmp_dst[31:16] := Saturate16(a[63:32]) +tmp_dst[47:32] := Saturate16(a[95:64]) +tmp_dst[63:48] := Saturate16(a[127:96]) +tmp_dst[79:64] := Saturate16(b[31:0]) +tmp_dst[95:80] := Saturate16(b[63:32]) +tmp_dst[111:96] := Saturate16(b[95:64]) +tmp_dst[127:112] := Saturate16(b[127:96]) +tmp_dst[143:128] := Saturate16(a[159:128]) +tmp_dst[159:144] := Saturate16(a[191:160]) +tmp_dst[175:160] := Saturate16(a[223:192]) +tmp_dst[191:176] := Saturate16(a[255:224]) +tmp_dst[207:192] := Saturate16(b[159:128]) +tmp_dst[223:208] := Saturate16(b[191:160]) +tmp_dst[239:224] := Saturate16(b[223:192]) +tmp_dst[255:240] := Saturate16(b[255:224]) +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- Miscellaneous - - - - - - Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit - integers using signed saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - tmp_dst[15:0] := Saturate16(a[31:0]) - tmp_dst[31:16] := Saturate16(a[63:32]) - tmp_dst[47:32] := Saturate16(a[95:64]) - tmp_dst[63:48] := Saturate16(a[127:96]) - tmp_dst[79:64] := Saturate16(b[31:0]) - tmp_dst[95:80] := Saturate16(b[63:32]) - tmp_dst[111:96] := Saturate16(b[95:64]) - tmp_dst[127:112] := Saturate16(b[127:96]) - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + Miscellaneous + + + + + + Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[15:0] := Saturate16(a[31:0]) +tmp_dst[31:16] := Saturate16(a[63:32]) +tmp_dst[47:32] := Saturate16(a[95:64]) +tmp_dst[63:48] := Saturate16(a[127:96]) +tmp_dst[79:64] := Saturate16(b[31:0]) +tmp_dst[95:80] := Saturate16(b[63:32]) +tmp_dst[111:96] := Saturate16(b[95:64]) +tmp_dst[127:112] := Saturate16(b[127:96]) +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- Miscellaneous - - - - - Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit - integers using signed saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - tmp_dst[15:0] := Saturate16(a[31:0]) - tmp_dst[31:16] := Saturate16(a[63:32]) - tmp_dst[47:32] := Saturate16(a[95:64]) - tmp_dst[63:48] := Saturate16(a[127:96]) - tmp_dst[79:64] := Saturate16(b[31:0]) - tmp_dst[95:80] := Saturate16(b[63:32]) - tmp_dst[111:96] := Saturate16(b[95:64]) - tmp_dst[127:112] := Saturate16(b[127:96]) - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + Miscellaneous + + + + + Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[15:0] := Saturate16(a[31:0]) +tmp_dst[31:16] := Saturate16(a[63:32]) +tmp_dst[47:32] := Saturate16(a[95:64]) +tmp_dst[63:48] := Saturate16(a[127:96]) +tmp_dst[79:64] := Saturate16(b[31:0]) +tmp_dst[95:80] := Saturate16(b[63:32]) +tmp_dst[111:96] := Saturate16(b[95:64]) +tmp_dst[127:112] := Saturate16(b[127:96]) +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- Miscellaneous - - - - - - Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers - using signed saturation, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - tmp_dst[7:0] := Saturate8(a[15:0]) - tmp_dst[15:8] := Saturate8(a[31:16]) - tmp_dst[23:16] := Saturate8(a[47:32]) - tmp_dst[31:24] := Saturate8(a[63:48]) - tmp_dst[39:32] := Saturate8(a[79:64]) - tmp_dst[47:40] := Saturate8(a[95:80]) - tmp_dst[55:48] := Saturate8(a[111:96]) - tmp_dst[63:56] := Saturate8(a[127:112]) - tmp_dst[71:64] := Saturate8(b[15:0]) - tmp_dst[79:72] := Saturate8(b[31:16]) - tmp_dst[87:80] := Saturate8(b[47:32]) - tmp_dst[95:88] := Saturate8(b[63:48]) - tmp_dst[103:96] := Saturate8(b[79:64]) - tmp_dst[111:104] := Saturate8(b[95:80]) - tmp_dst[119:112] := Saturate8(b[111:96]) - tmp_dst[127:120] := Saturate8(b[127:112]) - tmp_dst[135:128] := Saturate8(a[143:128]) - tmp_dst[143:136] := Saturate8(a[159:144]) - tmp_dst[151:144] := Saturate8(a[175:160]) - tmp_dst[159:152] := Saturate8(a[191:176]) - tmp_dst[167:160] := Saturate8(a[207:192]) - tmp_dst[175:168] := Saturate8(a[223:208]) - tmp_dst[183:176] := Saturate8(a[239:224]) - tmp_dst[191:184] := Saturate8(a[255:240]) - tmp_dst[199:192] := Saturate8(b[143:128]) - tmp_dst[207:200] := Saturate8(b[159:144]) - tmp_dst[215:208] := Saturate8(b[175:160]) - tmp_dst[223:216] := Saturate8(b[191:176]) - tmp_dst[231:224] := Saturate8(b[207:192]) - tmp_dst[239:232] := Saturate8(b[223:208]) - tmp_dst[247:240] := Saturate8(b[239:224]) - tmp_dst[255:248] := Saturate8(b[255:240]) - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + Miscellaneous + + + + + + Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[7:0] := Saturate8(a[15:0]) +tmp_dst[15:8] := Saturate8(a[31:16]) +tmp_dst[23:16] := Saturate8(a[47:32]) +tmp_dst[31:24] := Saturate8(a[63:48]) +tmp_dst[39:32] := Saturate8(a[79:64]) +tmp_dst[47:40] := Saturate8(a[95:80]) +tmp_dst[55:48] := Saturate8(a[111:96]) +tmp_dst[63:56] := Saturate8(a[127:112]) +tmp_dst[71:64] := Saturate8(b[15:0]) +tmp_dst[79:72] := Saturate8(b[31:16]) +tmp_dst[87:80] := Saturate8(b[47:32]) +tmp_dst[95:88] := Saturate8(b[63:48]) +tmp_dst[103:96] := Saturate8(b[79:64]) +tmp_dst[111:104] := Saturate8(b[95:80]) +tmp_dst[119:112] := Saturate8(b[111:96]) +tmp_dst[127:120] := Saturate8(b[127:112]) +tmp_dst[135:128] := Saturate8(a[143:128]) +tmp_dst[143:136] := Saturate8(a[159:144]) +tmp_dst[151:144] := Saturate8(a[175:160]) +tmp_dst[159:152] := Saturate8(a[191:176]) +tmp_dst[167:160] := Saturate8(a[207:192]) +tmp_dst[175:168] := Saturate8(a[223:208]) +tmp_dst[183:176] := Saturate8(a[239:224]) +tmp_dst[191:184] := Saturate8(a[255:240]) +tmp_dst[199:192] := Saturate8(b[143:128]) +tmp_dst[207:200] := Saturate8(b[159:144]) +tmp_dst[215:208] := Saturate8(b[175:160]) +tmp_dst[223:216] := Saturate8(b[191:176]) +tmp_dst[231:224] := Saturate8(b[207:192]) +tmp_dst[239:232] := Saturate8(b[223:208]) +tmp_dst[247:240] := Saturate8(b[239:224]) +tmp_dst[255:248] := Saturate8(b[255:240]) +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- Miscellaneous - - - - - Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers - using signed saturation, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - tmp_dst[7:0] := Saturate8(a[15:0]) - tmp_dst[15:8] := Saturate8(a[31:16]) - tmp_dst[23:16] := Saturate8(a[47:32]) - tmp_dst[31:24] := Saturate8(a[63:48]) - tmp_dst[39:32] := Saturate8(a[79:64]) - tmp_dst[47:40] := Saturate8(a[95:80]) - tmp_dst[55:48] := Saturate8(a[111:96]) - tmp_dst[63:56] := Saturate8(a[127:112]) - tmp_dst[71:64] := Saturate8(b[15:0]) - tmp_dst[79:72] := Saturate8(b[31:16]) - tmp_dst[87:80] := Saturate8(b[47:32]) - tmp_dst[95:88] := Saturate8(b[63:48]) - tmp_dst[103:96] := Saturate8(b[79:64]) - tmp_dst[111:104] := Saturate8(b[95:80]) - tmp_dst[119:112] := Saturate8(b[111:96]) - tmp_dst[127:120] := Saturate8(b[127:112]) - tmp_dst[135:128] := Saturate8(a[143:128]) - tmp_dst[143:136] := Saturate8(a[159:144]) - tmp_dst[151:144] := Saturate8(a[175:160]) - tmp_dst[159:152] := Saturate8(a[191:176]) - tmp_dst[167:160] := Saturate8(a[207:192]) - tmp_dst[175:168] := Saturate8(a[223:208]) - tmp_dst[183:176] := Saturate8(a[239:224]) - tmp_dst[191:184] := Saturate8(a[255:240]) - tmp_dst[199:192] := Saturate8(b[143:128]) - tmp_dst[207:200] := Saturate8(b[159:144]) - tmp_dst[215:208] := Saturate8(b[175:160]) - tmp_dst[223:216] := Saturate8(b[191:176]) - tmp_dst[231:224] := Saturate8(b[207:192]) - tmp_dst[239:232] := Saturate8(b[223:208]) - tmp_dst[247:240] := Saturate8(b[239:224]) - tmp_dst[255:248] := Saturate8(b[255:240]) - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + Miscellaneous + + + + + Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[7:0] := Saturate8(a[15:0]) +tmp_dst[15:8] := Saturate8(a[31:16]) +tmp_dst[23:16] := Saturate8(a[47:32]) +tmp_dst[31:24] := Saturate8(a[63:48]) +tmp_dst[39:32] := Saturate8(a[79:64]) +tmp_dst[47:40] := Saturate8(a[95:80]) +tmp_dst[55:48] := Saturate8(a[111:96]) +tmp_dst[63:56] := Saturate8(a[127:112]) +tmp_dst[71:64] := Saturate8(b[15:0]) +tmp_dst[79:72] := Saturate8(b[31:16]) +tmp_dst[87:80] := Saturate8(b[47:32]) +tmp_dst[95:88] := Saturate8(b[63:48]) +tmp_dst[103:96] := Saturate8(b[79:64]) +tmp_dst[111:104] := Saturate8(b[95:80]) +tmp_dst[119:112] := Saturate8(b[111:96]) +tmp_dst[127:120] := Saturate8(b[127:112]) +tmp_dst[135:128] := Saturate8(a[143:128]) +tmp_dst[143:136] := Saturate8(a[159:144]) +tmp_dst[151:144] := Saturate8(a[175:160]) +tmp_dst[159:152] := Saturate8(a[191:176]) +tmp_dst[167:160] := Saturate8(a[207:192]) +tmp_dst[175:168] := Saturate8(a[223:208]) +tmp_dst[183:176] := Saturate8(a[239:224]) +tmp_dst[191:184] := Saturate8(a[255:240]) +tmp_dst[199:192] := Saturate8(b[143:128]) +tmp_dst[207:200] := Saturate8(b[159:144]) +tmp_dst[215:208] := Saturate8(b[175:160]) +tmp_dst[223:216] := Saturate8(b[191:176]) +tmp_dst[231:224] := Saturate8(b[207:192]) +tmp_dst[239:232] := Saturate8(b[223:208]) +tmp_dst[247:240] := Saturate8(b[239:224]) +tmp_dst[255:248] := Saturate8(b[255:240]) +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- Miscellaneous - - - - - - Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers - using signed saturation, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - tmp_dst[7:0] := Saturate8(a[15:0]) - tmp_dst[15:8] := Saturate8(a[31:16]) - tmp_dst[23:16] := Saturate8(a[47:32]) - tmp_dst[31:24] := Saturate8(a[63:48]) - tmp_dst[39:32] := Saturate8(a[79:64]) - tmp_dst[47:40] := Saturate8(a[95:80]) - tmp_dst[55:48] := Saturate8(a[111:96]) - tmp_dst[63:56] := Saturate8(a[127:112]) - tmp_dst[71:64] := Saturate8(b[15:0]) - tmp_dst[79:72] := Saturate8(b[31:16]) - tmp_dst[87:80] := Saturate8(b[47:32]) - tmp_dst[95:88] := Saturate8(b[63:48]) - tmp_dst[103:96] := Saturate8(b[79:64]) - tmp_dst[111:104] := Saturate8(b[95:80]) - tmp_dst[119:112] := Saturate8(b[111:96]) - tmp_dst[127:120] := Saturate8(b[127:112]) - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + Miscellaneous + + + + + + Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[7:0] := Saturate8(a[15:0]) +tmp_dst[15:8] := Saturate8(a[31:16]) +tmp_dst[23:16] := Saturate8(a[47:32]) +tmp_dst[31:24] := Saturate8(a[63:48]) +tmp_dst[39:32] := Saturate8(a[79:64]) +tmp_dst[47:40] := Saturate8(a[95:80]) +tmp_dst[55:48] := Saturate8(a[111:96]) +tmp_dst[63:56] := Saturate8(a[127:112]) +tmp_dst[71:64] := Saturate8(b[15:0]) +tmp_dst[79:72] := Saturate8(b[31:16]) +tmp_dst[87:80] := Saturate8(b[47:32]) +tmp_dst[95:88] := Saturate8(b[63:48]) +tmp_dst[103:96] := Saturate8(b[79:64]) +tmp_dst[111:104] := Saturate8(b[95:80]) +tmp_dst[119:112] := Saturate8(b[111:96]) +tmp_dst[127:120] := Saturate8(b[127:112]) +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- Miscellaneous - - - - - Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers - using signed saturation, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - tmp_dst[7:0] := Saturate8(a[15:0]) - tmp_dst[15:8] := Saturate8(a[31:16]) - tmp_dst[23:16] := Saturate8(a[47:32]) - tmp_dst[31:24] := Saturate8(a[63:48]) - tmp_dst[39:32] := Saturate8(a[79:64]) - tmp_dst[47:40] := Saturate8(a[95:80]) - tmp_dst[55:48] := Saturate8(a[111:96]) - tmp_dst[63:56] := Saturate8(a[127:112]) - tmp_dst[71:64] := Saturate8(b[15:0]) - tmp_dst[79:72] := Saturate8(b[31:16]) - tmp_dst[87:80] := Saturate8(b[47:32]) - tmp_dst[95:88] := Saturate8(b[63:48]) - tmp_dst[103:96] := Saturate8(b[79:64]) - tmp_dst[111:104] := Saturate8(b[95:80]) - tmp_dst[119:112] := Saturate8(b[111:96]) - tmp_dst[127:120] := Saturate8(b[127:112]) - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + Miscellaneous + + + + + Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[7:0] := Saturate8(a[15:0]) +tmp_dst[15:8] := Saturate8(a[31:16]) +tmp_dst[23:16] := Saturate8(a[47:32]) +tmp_dst[31:24] := Saturate8(a[63:48]) +tmp_dst[39:32] := Saturate8(a[79:64]) +tmp_dst[47:40] := Saturate8(a[95:80]) +tmp_dst[55:48] := Saturate8(a[111:96]) +tmp_dst[63:56] := Saturate8(a[127:112]) +tmp_dst[71:64] := Saturate8(b[15:0]) +tmp_dst[79:72] := Saturate8(b[31:16]) +tmp_dst[87:80] := Saturate8(b[47:32]) +tmp_dst[95:88] := Saturate8(b[63:48]) +tmp_dst[103:96] := Saturate8(b[79:64]) +tmp_dst[111:104] := Saturate8(b[95:80]) +tmp_dst[119:112] := Saturate8(b[111:96]) +tmp_dst[127:120] := Saturate8(b[127:112]) +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- Miscellaneous - - - - - - Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit - integers using unsigned saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - tmp_dst[15:0] := SaturateU16(a[31:0]) - tmp_dst[31:16] := SaturateU16(a[63:32]) - tmp_dst[47:32] := SaturateU16(a[95:64]) - tmp_dst[63:48] := SaturateU16(a[127:96]) - tmp_dst[79:64] := SaturateU16(b[31:0]) - tmp_dst[95:80] := SaturateU16(b[63:32]) - tmp_dst[111:96] := SaturateU16(b[95:64]) - tmp_dst[127:112] := SaturateU16(b[127:96]) - tmp_dst[143:128] := SaturateU16(a[159:128]) - tmp_dst[159:144] := SaturateU16(a[191:160]) - tmp_dst[175:160] := SaturateU16(a[223:192]) - tmp_dst[191:176] := SaturateU16(a[255:224]) - tmp_dst[207:192] := SaturateU16(b[159:128]) - tmp_dst[223:208] := SaturateU16(b[191:160]) - tmp_dst[239:224] := SaturateU16(b[223:192]) - tmp_dst[255:240] := SaturateU16(b[255:224]) - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + Miscellaneous + + + + + + Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[15:0] := SaturateU16(a[31:0]) +tmp_dst[31:16] := SaturateU16(a[63:32]) +tmp_dst[47:32] := SaturateU16(a[95:64]) +tmp_dst[63:48] := SaturateU16(a[127:96]) +tmp_dst[79:64] := SaturateU16(b[31:0]) +tmp_dst[95:80] := SaturateU16(b[63:32]) +tmp_dst[111:96] := SaturateU16(b[95:64]) +tmp_dst[127:112] := SaturateU16(b[127:96]) +tmp_dst[143:128] := SaturateU16(a[159:128]) +tmp_dst[159:144] := SaturateU16(a[191:160]) +tmp_dst[175:160] := SaturateU16(a[223:192]) +tmp_dst[191:176] := SaturateU16(a[255:224]) +tmp_dst[207:192] := SaturateU16(b[159:128]) +tmp_dst[223:208] := SaturateU16(b[191:160]) +tmp_dst[239:224] := SaturateU16(b[223:192]) +tmp_dst[255:240] := SaturateU16(b[255:224]) +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- Miscellaneous - - - - - Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit - integers using unsigned saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - tmp_dst[15:0] := SaturateU16(a[31:0]) - tmp_dst[31:16] := SaturateU16(a[63:32]) - tmp_dst[47:32] := SaturateU16(a[95:64]) - tmp_dst[63:48] := SaturateU16(a[127:96]) - tmp_dst[79:64] := SaturateU16(b[31:0]) - tmp_dst[95:80] := SaturateU16(b[63:32]) - tmp_dst[111:96] := SaturateU16(b[95:64]) - tmp_dst[127:112] := SaturateU16(b[127:96]) - tmp_dst[143:128] := SaturateU16(a[159:128]) - tmp_dst[159:144] := SaturateU16(a[191:160]) - tmp_dst[175:160] := SaturateU16(a[223:192]) - tmp_dst[191:176] := SaturateU16(a[255:224]) - tmp_dst[207:192] := SaturateU16(b[159:128]) - tmp_dst[223:208] := SaturateU16(b[191:160]) - tmp_dst[239:224] := SaturateU16(b[223:192]) - tmp_dst[255:240] := SaturateU16(b[255:224]) - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + Miscellaneous + + + + + Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[15:0] := SaturateU16(a[31:0]) +tmp_dst[31:16] := SaturateU16(a[63:32]) +tmp_dst[47:32] := SaturateU16(a[95:64]) +tmp_dst[63:48] := SaturateU16(a[127:96]) +tmp_dst[79:64] := SaturateU16(b[31:0]) +tmp_dst[95:80] := SaturateU16(b[63:32]) +tmp_dst[111:96] := SaturateU16(b[95:64]) +tmp_dst[127:112] := SaturateU16(b[127:96]) +tmp_dst[143:128] := SaturateU16(a[159:128]) +tmp_dst[159:144] := SaturateU16(a[191:160]) +tmp_dst[175:160] := SaturateU16(a[223:192]) +tmp_dst[191:176] := SaturateU16(a[255:224]) +tmp_dst[207:192] := SaturateU16(b[159:128]) +tmp_dst[223:208] := SaturateU16(b[191:160]) +tmp_dst[239:224] := SaturateU16(b[223:192]) +tmp_dst[255:240] := SaturateU16(b[255:224]) +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- Miscellaneous - - - - - - Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit - integers using unsigned saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - tmp_dst[15:0] := SaturateU16(a[31:0]) - tmp_dst[31:16] := SaturateU16(a[63:32]) - tmp_dst[47:32] := SaturateU16(a[95:64]) - tmp_dst[63:48] := SaturateU16(a[127:96]) - tmp_dst[79:64] := SaturateU16(b[31:0]) - tmp_dst[95:80] := SaturateU16(b[63:32]) - tmp_dst[111:96] := SaturateU16(b[95:64]) - tmp_dst[127:112] := SaturateU16(b[127:96]) - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + Miscellaneous + + + + + + Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[15:0] := SaturateU16(a[31:0]) +tmp_dst[31:16] := SaturateU16(a[63:32]) +tmp_dst[47:32] := SaturateU16(a[95:64]) +tmp_dst[63:48] := SaturateU16(a[127:96]) +tmp_dst[79:64] := SaturateU16(b[31:0]) +tmp_dst[95:80] := SaturateU16(b[63:32]) +tmp_dst[111:96] := SaturateU16(b[95:64]) +tmp_dst[127:112] := SaturateU16(b[127:96]) +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- Miscellaneous - - - - - Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit - integers using unsigned saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - tmp_dst[15:0] := SaturateU16(a[31:0]) - tmp_dst[31:16] := SaturateU16(a[63:32]) - tmp_dst[47:32] := SaturateU16(a[95:64]) - tmp_dst[63:48] := SaturateU16(a[127:96]) - tmp_dst[79:64] := SaturateU16(b[31:0]) - tmp_dst[95:80] := SaturateU16(b[63:32]) - tmp_dst[111:96] := SaturateU16(b[95:64]) - tmp_dst[127:112] := SaturateU16(b[127:96]) - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + Miscellaneous + + + + + Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[15:0] := SaturateU16(a[31:0]) +tmp_dst[31:16] := SaturateU16(a[63:32]) +tmp_dst[47:32] := SaturateU16(a[95:64]) +tmp_dst[63:48] := SaturateU16(a[127:96]) +tmp_dst[79:64] := SaturateU16(b[31:0]) +tmp_dst[95:80] := SaturateU16(b[63:32]) +tmp_dst[111:96] := SaturateU16(b[95:64]) +tmp_dst[127:112] := SaturateU16(b[127:96]) +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- Miscellaneous - - - - - - Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers - using unsigned saturation, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - tmp_dst[7:0] := SaturateU8(a[15:0]) - tmp_dst[15:8] := SaturateU8(a[31:16]) - tmp_dst[23:16] := SaturateU8(a[47:32]) - tmp_dst[31:24] := SaturateU8(a[63:48]) - tmp_dst[39:32] := SaturateU8(a[79:64]) - tmp_dst[47:40] := SaturateU8(a[95:80]) - tmp_dst[55:48] := SaturateU8(a[111:96]) - tmp_dst[63:56] := SaturateU8(a[127:112]) - tmp_dst[71:64] := SaturateU8(b[15:0]) - tmp_dst[79:72] := SaturateU8(b[31:16]) - tmp_dst[87:80] := SaturateU8(b[47:32]) - tmp_dst[95:88] := SaturateU8(b[63:48]) - tmp_dst[103:96] := SaturateU8(b[79:64]) - tmp_dst[111:104] := SaturateU8(b[95:80]) - tmp_dst[119:112] := SaturateU8(b[111:96]) - tmp_dst[127:120] := SaturateU8(b[127:112]) - tmp_dst[135:128] := SaturateU8(a[143:128]) - tmp_dst[143:136] := SaturateU8(a[159:144]) - tmp_dst[151:144] := SaturateU8(a[175:160]) - tmp_dst[159:152] := SaturateU8(a[191:176]) - tmp_dst[167:160] := SaturateU8(a[207:192]) - tmp_dst[175:168] := SaturateU8(a[223:208]) - tmp_dst[183:176] := SaturateU8(a[239:224]) - tmp_dst[191:184] := SaturateU8(a[255:240]) - tmp_dst[199:192] := SaturateU8(b[143:128]) - tmp_dst[207:200] := SaturateU8(b[159:144]) - tmp_dst[215:208] := SaturateU8(b[175:160]) - tmp_dst[223:216] := SaturateU8(b[191:176]) - tmp_dst[231:224] := SaturateU8(b[207:192]) - tmp_dst[239:232] := SaturateU8(b[223:208]) - tmp_dst[247:240] := SaturateU8(b[239:224]) - tmp_dst[255:248] := SaturateU8(b[255:240]) - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + Miscellaneous + + + + + + Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[7:0] := SaturateU8(a[15:0]) +tmp_dst[15:8] := SaturateU8(a[31:16]) +tmp_dst[23:16] := SaturateU8(a[47:32]) +tmp_dst[31:24] := SaturateU8(a[63:48]) +tmp_dst[39:32] := SaturateU8(a[79:64]) +tmp_dst[47:40] := SaturateU8(a[95:80]) +tmp_dst[55:48] := SaturateU8(a[111:96]) +tmp_dst[63:56] := SaturateU8(a[127:112]) +tmp_dst[71:64] := SaturateU8(b[15:0]) +tmp_dst[79:72] := SaturateU8(b[31:16]) +tmp_dst[87:80] := SaturateU8(b[47:32]) +tmp_dst[95:88] := SaturateU8(b[63:48]) +tmp_dst[103:96] := SaturateU8(b[79:64]) +tmp_dst[111:104] := SaturateU8(b[95:80]) +tmp_dst[119:112] := SaturateU8(b[111:96]) +tmp_dst[127:120] := SaturateU8(b[127:112]) +tmp_dst[135:128] := SaturateU8(a[143:128]) +tmp_dst[143:136] := SaturateU8(a[159:144]) +tmp_dst[151:144] := SaturateU8(a[175:160]) +tmp_dst[159:152] := SaturateU8(a[191:176]) +tmp_dst[167:160] := SaturateU8(a[207:192]) +tmp_dst[175:168] := SaturateU8(a[223:208]) +tmp_dst[183:176] := SaturateU8(a[239:224]) +tmp_dst[191:184] := SaturateU8(a[255:240]) +tmp_dst[199:192] := SaturateU8(b[143:128]) +tmp_dst[207:200] := SaturateU8(b[159:144]) +tmp_dst[215:208] := SaturateU8(b[175:160]) +tmp_dst[223:216] := SaturateU8(b[191:176]) +tmp_dst[231:224] := SaturateU8(b[207:192]) +tmp_dst[239:232] := SaturateU8(b[223:208]) +tmp_dst[247:240] := SaturateU8(b[239:224]) +tmp_dst[255:248] := SaturateU8(b[255:240]) +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- Miscellaneous - - - - - Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers - using unsigned saturation, and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - tmp_dst[7:0] := SaturateU8(a[15:0]) - tmp_dst[15:8] := SaturateU8(a[31:16]) - tmp_dst[23:16] := SaturateU8(a[47:32]) - tmp_dst[31:24] := SaturateU8(a[63:48]) - tmp_dst[39:32] := SaturateU8(a[79:64]) - tmp_dst[47:40] := SaturateU8(a[95:80]) - tmp_dst[55:48] := SaturateU8(a[111:96]) - tmp_dst[63:56] := SaturateU8(a[127:112]) - tmp_dst[71:64] := SaturateU8(b[15:0]) - tmp_dst[79:72] := SaturateU8(b[31:16]) - tmp_dst[87:80] := SaturateU8(b[47:32]) - tmp_dst[95:88] := SaturateU8(b[63:48]) - tmp_dst[103:96] := SaturateU8(b[79:64]) - tmp_dst[111:104] := SaturateU8(b[95:80]) - tmp_dst[119:112] := SaturateU8(b[111:96]) - tmp_dst[127:120] := SaturateU8(b[127:112]) - tmp_dst[135:128] := SaturateU8(a[143:128]) - tmp_dst[143:136] := SaturateU8(a[159:144]) - tmp_dst[151:144] := SaturateU8(a[175:160]) - tmp_dst[159:152] := SaturateU8(a[191:176]) - tmp_dst[167:160] := SaturateU8(a[207:192]) - tmp_dst[175:168] := SaturateU8(a[223:208]) - tmp_dst[183:176] := SaturateU8(a[239:224]) - tmp_dst[191:184] := SaturateU8(a[255:240]) - tmp_dst[199:192] := SaturateU8(b[143:128]) - tmp_dst[207:200] := SaturateU8(b[159:144]) - tmp_dst[215:208] := SaturateU8(b[175:160]) - tmp_dst[223:216] := SaturateU8(b[191:176]) - tmp_dst[231:224] := SaturateU8(b[207:192]) - tmp_dst[239:232] := SaturateU8(b[223:208]) - tmp_dst[247:240] := SaturateU8(b[239:224]) - tmp_dst[255:248] := SaturateU8(b[255:240]) - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + Miscellaneous + + + + + Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[7:0] := SaturateU8(a[15:0]) +tmp_dst[15:8] := SaturateU8(a[31:16]) +tmp_dst[23:16] := SaturateU8(a[47:32]) +tmp_dst[31:24] := SaturateU8(a[63:48]) +tmp_dst[39:32] := SaturateU8(a[79:64]) +tmp_dst[47:40] := SaturateU8(a[95:80]) +tmp_dst[55:48] := SaturateU8(a[111:96]) +tmp_dst[63:56] := SaturateU8(a[127:112]) +tmp_dst[71:64] := SaturateU8(b[15:0]) +tmp_dst[79:72] := SaturateU8(b[31:16]) +tmp_dst[87:80] := SaturateU8(b[47:32]) +tmp_dst[95:88] := SaturateU8(b[63:48]) +tmp_dst[103:96] := SaturateU8(b[79:64]) +tmp_dst[111:104] := SaturateU8(b[95:80]) +tmp_dst[119:112] := SaturateU8(b[111:96]) +tmp_dst[127:120] := SaturateU8(b[127:112]) +tmp_dst[135:128] := SaturateU8(a[143:128]) +tmp_dst[143:136] := SaturateU8(a[159:144]) +tmp_dst[151:144] := SaturateU8(a[175:160]) +tmp_dst[159:152] := SaturateU8(a[191:176]) +tmp_dst[167:160] := SaturateU8(a[207:192]) +tmp_dst[175:168] := SaturateU8(a[223:208]) +tmp_dst[183:176] := SaturateU8(a[239:224]) +tmp_dst[191:184] := SaturateU8(a[255:240]) +tmp_dst[199:192] := SaturateU8(b[143:128]) +tmp_dst[207:200] := SaturateU8(b[159:144]) +tmp_dst[215:208] := SaturateU8(b[175:160]) +tmp_dst[223:216] := SaturateU8(b[191:176]) +tmp_dst[231:224] := SaturateU8(b[207:192]) +tmp_dst[239:232] := SaturateU8(b[223:208]) +tmp_dst[247:240] := SaturateU8(b[239:224]) +tmp_dst[255:248] := SaturateU8(b[255:240]) +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- Miscellaneous - - - - - - Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers - using unsigned saturation, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - tmp_dst[7:0] := SaturateU8(a[15:0]) - tmp_dst[15:8] := SaturateU8(a[31:16]) - tmp_dst[23:16] := SaturateU8(a[47:32]) - tmp_dst[31:24] := SaturateU8(a[63:48]) - tmp_dst[39:32] := SaturateU8(a[79:64]) - tmp_dst[47:40] := SaturateU8(a[95:80]) - tmp_dst[55:48] := SaturateU8(a[111:96]) - tmp_dst[63:56] := SaturateU8(a[127:112]) - tmp_dst[71:64] := SaturateU8(b[15:0]) - tmp_dst[79:72] := SaturateU8(b[31:16]) - tmp_dst[87:80] := SaturateU8(b[47:32]) - tmp_dst[95:88] := SaturateU8(b[63:48]) - tmp_dst[103:96] := SaturateU8(b[79:64]) - tmp_dst[111:104] := SaturateU8(b[95:80]) - tmp_dst[119:112] := SaturateU8(b[111:96]) - tmp_dst[127:120] := SaturateU8(b[127:112]) - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + Miscellaneous + + + + + + Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[7:0] := SaturateU8(a[15:0]) +tmp_dst[15:8] := SaturateU8(a[31:16]) +tmp_dst[23:16] := SaturateU8(a[47:32]) +tmp_dst[31:24] := SaturateU8(a[63:48]) +tmp_dst[39:32] := SaturateU8(a[79:64]) +tmp_dst[47:40] := SaturateU8(a[95:80]) +tmp_dst[55:48] := SaturateU8(a[111:96]) +tmp_dst[63:56] := SaturateU8(a[127:112]) +tmp_dst[71:64] := SaturateU8(b[15:0]) +tmp_dst[79:72] := SaturateU8(b[31:16]) +tmp_dst[87:80] := SaturateU8(b[47:32]) +tmp_dst[95:88] := SaturateU8(b[63:48]) +tmp_dst[103:96] := SaturateU8(b[79:64]) +tmp_dst[111:104] := SaturateU8(b[95:80]) +tmp_dst[119:112] := SaturateU8(b[111:96]) +tmp_dst[127:120] := SaturateU8(b[127:112]) +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- Miscellaneous - - - - - Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers - using unsigned saturation, and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - tmp_dst[7:0] := SaturateU8(a[15:0]) - tmp_dst[15:8] := SaturateU8(a[31:16]) - tmp_dst[23:16] := SaturateU8(a[47:32]) - tmp_dst[31:24] := SaturateU8(a[63:48]) - tmp_dst[39:32] := SaturateU8(a[79:64]) - tmp_dst[47:40] := SaturateU8(a[95:80]) - tmp_dst[55:48] := SaturateU8(a[111:96]) - tmp_dst[63:56] := SaturateU8(a[127:112]) - tmp_dst[71:64] := SaturateU8(b[15:0]) - tmp_dst[79:72] := SaturateU8(b[31:16]) - tmp_dst[87:80] := SaturateU8(b[47:32]) - tmp_dst[95:88] := SaturateU8(b[63:48]) - tmp_dst[103:96] := SaturateU8(b[79:64]) - tmp_dst[111:104] := SaturateU8(b[95:80]) - tmp_dst[119:112] := SaturateU8(b[111:96]) - tmp_dst[127:120] := SaturateU8(b[127:112]) - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + Miscellaneous + + + + + Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[7:0] := SaturateU8(a[15:0]) +tmp_dst[15:8] := SaturateU8(a[31:16]) +tmp_dst[23:16] := SaturateU8(a[47:32]) +tmp_dst[31:24] := SaturateU8(a[63:48]) +tmp_dst[39:32] := SaturateU8(a[79:64]) +tmp_dst[47:40] := SaturateU8(a[95:80]) +tmp_dst[55:48] := SaturateU8(a[111:96]) +tmp_dst[63:56] := SaturateU8(a[127:112]) +tmp_dst[71:64] := SaturateU8(b[15:0]) +tmp_dst[79:72] := SaturateU8(b[31:16]) +tmp_dst[87:80] := SaturateU8(b[47:32]) +tmp_dst[95:88] := SaturateU8(b[63:48]) +tmp_dst[103:96] := SaturateU8(b[79:64]) +tmp_dst[111:104] := SaturateU8(b[95:80]) +tmp_dst[119:112] := SaturateU8(b[111:96]) +tmp_dst[127:120] := SaturateU8(b[127:112]) +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed signed 16-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst". - - FOR j := 0 to 15 - i := 16*j - l := 8*j - dst[l+7:l] := Saturate8(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + + + Convert packed signed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := 16*j + l := 8*j + dst[l+7:l] := Saturate8(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed signed 16-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 16*j - l := 8*j - IF k[j] - dst[l+7:l] := Saturate8(a[i+15:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed signed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate8(a[i+15:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed signed 16-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 15 - i := 16*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+15:i]) - FI - ENDFOR - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed signed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 15 + i := 16*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+15:i]) + FI +ENDFOR + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed signed 16-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 16*j - l := 8*j - IF k[j] - dst[l+7:l] := Saturate8(a[i+15:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + + + + Convert packed signed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate8(a[i+15:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed signed 16-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst". - - FOR j := 0 to 7 - i := 16*j - l := 8*j - dst[l+7:l] := Saturate8(a[i+15:i]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + + + Convert packed signed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := 16*j + l := 8*j + dst[l+7:l] := Saturate8(a[i+15:i]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed signed 16-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 16*j - l := 8*j - IF k[j] - dst[l+7:l] := Saturate8(a[i+15:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed signed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate8(a[i+15:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed signed 16-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 7 - i := 16*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+15:i]) - FI - ENDFOR - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed signed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 16*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+15:i]) + FI +ENDFOR + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed signed 16-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 16*j - l := 8*j - IF k[j] - dst[l+7:l] := Saturate8(a[i+15:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + + + + Convert packed signed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate8(a[i+15:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - - - Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - l := j*16 - IF k[j] - dst[l+15:l] := SignExtend16(a[i+7:i]) - ELSE - dst[l+15:l] := src[l+15:l] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + + + + + Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + l := j*16 + IF k[j] + dst[l+15:l] := SignExtend16(a[i+7:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - - Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - l := j*16 - IF k[j] - dst[l+15:l] := SignExtend16(a[i+7:i]) - ELSE - dst[l+15:l] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + + + + Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + l := j*16 + IF k[j] + dst[l+15:l] := SignExtend16(a[i+7:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - - - Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*8 - l := j*16 - IF k[j] - dst[l+15:l] := SignExtend16(a[i+7:i]) - ELSE - dst[l+15:l] := src[l+15:l] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + + + + + Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*8 + l := j*16 + IF k[j] + dst[l+15:l] := SignExtend16(a[i+7:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - - Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 7 - i := j*8 - l := j*16 - IF k[j] - dst[l+15:l] := SignExtend16(a[i+7:i]) - ELSE - dst[l+15:l] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + + + + Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*8 + l := j*16 + IF k[j] + dst[l+15:l] := SignExtend16(a[i+7:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst". - - FOR j := 0 to 15 - i := 16*j - l := 8*j - dst[l+7:l] := SaturateU8(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + + + Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := 16*j + l := 8*j + dst[l+7:l] := SaturateU8(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 16*j - l := 8*j - IF k[j] - dst[l+7:l] := SaturateU8(a[i+15:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := SaturateU8(a[i+15:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the active results (those with their - respective bit set in writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 15 - i := 16*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+15:i]) - FI - ENDFOR - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 15 + i := 16*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+15:i]) + FI +ENDFOR + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 16*j - l := 8*j - IF k[j] - dst[l+7:l] := SaturateU8(a[i+15:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + + + + Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := SaturateU8(a[i+15:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst". - - FOR j := 0 to 7 - i := 16*j - l := 8*j - dst[l+7:l] := SaturateU8(a[i+15:i]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + + + Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := 16*j + l := 8*j + dst[l+7:l] := SaturateU8(a[i+15:i]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 16*j - l := 8*j - IF k[j] - dst[l+7:l] := SaturateU8(a[i+15:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := SaturateU8(a[i+15:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the active results (those with their - respective bit set in writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 7 - i := 16*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+15:i]) - FI - ENDFOR - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 16*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+15:i]) + FI +ENDFOR + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 16*j - l := 8*j - IF k[j] - dst[l+7:l] := SaturateU8(a[i+15:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + + + + Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := SaturateU8(a[i+15:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst". - - FOR j := 0 to 15 - i := 16*j - l := 8*j - dst[l+7:l] := Truncate8(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 15 + i := 16*j + l := 8*j + dst[l+7:l] := Truncate8(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 16*j - l := 8*j - IF k[j] - dst[l+7:l] := Truncate8(a[i+15:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate8(a[i+15:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, - and store the active results (those with their respective bit set in writemask "k") to - unaligned memory at "base_addr". - - FOR j := 0 to 15 - i := 16*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+15:i]) - FI - ENDFOR - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 15 + i := 16*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+15:i]) + FI +ENDFOR + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 16*j - l := 8*j - IF k[j] - dst[l+7:l] := Truncate8(a[i+15:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate8(a[i+15:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst". - - FOR j := 0 to 7 - i := 16*j - l := 8*j - dst[l+7:l] := Truncate8(a[i+15:i]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := 16*j + l := 8*j + dst[l+7:l] := Truncate8(a[i+15:i]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 16*j - l := 8*j - IF k[j] - dst[l+7:l] := Truncate8(a[i+15:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate8(a[i+15:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, - and store the active results (those with their respective bit set in writemask "k") to - unaligned memory at "base_addr". - - FOR j := 0 to 7 - i := 16*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+15:i]) - FI - ENDFOR - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 16*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+15:i]) + FI +ENDFOR + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 16*j - l := 8*j - IF k[j] - dst[l+7:l] := Truncate8(a[i+15:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate8(a[i+15:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - - - Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - l := j*16 - IF k[j] - dst[l+15:l] := ZeroExtend16(a[i+7:i]) - ELSE - dst[l+15:l] := src[l+15:l] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + + + + + Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + l := j*16 + IF k[j] + dst[l+15:l] := ZeroExtend16(a[i+7:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - - Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - l := j*16 - IF k[j] - dst[l+15:l] := ZeroExtend16(a[i+7:i]) - ELSE - dst[l+15:l] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + + + + Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + l := j*16 + IF k[j] + dst[l+15:l] := ZeroExtend16(a[i+7:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - - - Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*8 - l := j*16 - IF k[j] - dst[l+15:l] := ZeroExtend16(a[i+7:i]) - ELSE - dst[l+15:l] := src[l+15:l] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + + + + + Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*8 + l := j*16 + IF k[j] + dst[l+15:l] := ZeroExtend16(a[i+7:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - - Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*8 - l := j*16 - IF k[j] - dst[l+15:l] := ZeroExtend16(a[i+7:i]) - ELSE - dst[l+15:l] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Convert + + + + Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*8 + l := j*16 + IF k[j] + dst[l+15:l] := ZeroExtend16(a[i+7:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Convert
- - - - - Broadcast 8-bit integer "a" to all elements of "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := a[7:0] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Set + + + + + Broadcast 8-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := a[7:0] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Set
- - - - Broadcast 8-bit integer "a" to all elements of "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := a[7:0] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Set + + + + Broadcast 8-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := a[7:0] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Set
- - - - - Broadcast 8-bit integer "a" to all elements of "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := a[7:0] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Set + + + + + Broadcast 8-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := a[7:0] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Set
- - - - Broadcast 8-bit integer "a" to all elements of "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := a[7:0] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Set + + + + Broadcast 8-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := a[7:0] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Set
- - - - - Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := a[15:0] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Set + + + + + Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := a[15:0] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Set
- - - - Broadcast 16-bit integer "a" to all elements of "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := a[15:0] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Set + + + + Broadcast 16-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := a[15:0] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Set
- - - - - Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := a[15:0] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Set + + + + + Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := a[15:0] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Set
- - - - Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := a[15:0] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Set + + + + Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := a[15:0] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Set
- - - - - Compare packed signed 8-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k". - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 31 - i := j*8 - k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 8-bit integers in "a" and "b" for equality, and store the - results in mask vector "k". - - FOR j := 0 to 31 - i := j*8 - k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 8-bit integers in "a" and "b" for greater-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 31 - i := j*8 - k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store - the results in mask vector "k". - - FOR j := 0 to 31 - i := j*8 - k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 8-bit integers in "a" and "b" for less-than-or-equal, and - store the results in mask vector "k". - - FOR j := 0 to 31 - i := j*8 - k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 8-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k". - - FOR j := 0 to 31 - i := j*8 - k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 8-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k". - - FOR j := 0 to 31 - i := j*8 - k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - - Compare packed signed 8-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 31 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + + Compare packed signed 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 8-bit integers in "a" and "b" for equality, and store the - results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 8-bit integers in "a" and "b" for greater-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 8-bit integers in "a" and "b" for less-than-or-equal, and - store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 8-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 8-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 8-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k". - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 15 - i := j*8 - k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 8-bit integers in "a" and "b" for equality, and store the - results in mask vector "k". - - FOR j := 0 to 15 - i := j*8 - k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 8-bit integers in "a" and "b" for greater-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 15 - i := j*8 - k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store - the results in mask vector "k". - - FOR j := 0 to 15 - i := j*8 - k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 8-bit integers in "a" and "b" for less-than-or-equal, and - store the results in mask vector "k". - - FOR j := 0 to 15 - i := j*8 - k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 8-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k". - - FOR j := 0 to 15 - i := j*8 - k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 8-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k". - - FOR j := 0 to 15 - i := j*8 - k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - - Compare packed signed 8-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 15 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + + Compare packed signed 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 8-bit integers in "a" and "b" for equality, and store the - results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 8-bit integers in "a" and "b" for greater-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 8-bit integers in "a" and "b" for less-than-or-equal, and - store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 8-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 8-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 8-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k". - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 31 - i := j*8 - k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 8-bit integers in "a" and "b" for equality, and store - the results in mask vector "k". - - FOR j := 0 to 31 - i := j*8 - k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 8-bit integers in "a" and "b" for - greater-than-or-equal, and store the results in mask vector "k". - - FOR j := 0 to 31 - i := j*8 - k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 8-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k". - - FOR j := 0 to 31 - i := j*8 - k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 8-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 31 - i := j*8 - k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 8-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k". - - FOR j := 0 to 31 - i := j*8 - k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 8-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k". - - FOR j := 0 to 31 - i := j*8 - k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*8 + k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - - Compare packed unsigned 8-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 31 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + + Compare packed unsigned 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 8-bit integers in "a" and "b" for equality, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 8-bit integers in "a" and "b" for - greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 8-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 8-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 8-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 8-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 8-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k". - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 15 - i := j*8 - k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 8-bit integers in "a" and "b" for equality, and store - the results in mask vector "k". - - FOR j := 0 to 15 - i := j*8 - k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 8-bit integers in "a" and "b" for - greater-than-or-equal, and store the results in mask vector "k". - - FOR j := 0 to 15 - i := j*8 - k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 8-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k". - - FOR j := 0 to 15 - i := j*8 - k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 8-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 15 - i := j*8 - k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 8-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k". - - FOR j := 0 to 15 - i := j*8 - k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 8-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k". - - FOR j := 0 to 15 - i := j*8 - k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*8 + k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - - Compare packed unsigned 8-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 15 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + + Compare packed unsigned 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 8-bit integers in "a" and "b" for equality, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 8-bit integers in "a" and "b" for - greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 8-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 8-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 8-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 8-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 16-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k". - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 15 - i := j*16 - k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 16-bit integers in "a" and "b" for equality, and store - the results in mask vector "k". - - FOR j := 0 to 15 - i := j*16 - k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 16-bit integers in "a" and "b" for - greater-than-or-equal, and store the results in mask vector "k". - - FOR j := 0 to 15 - i := j*16 - k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 16-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k". - - FOR j := 0 to 15 - i := j*16 - k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 16-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 15 - i := j*16 - k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 16-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k". - - FOR j := 0 to 15 - i := j*16 - k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 16-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k". - - FOR j := 0 to 15 - i := j*16 - k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - - Compare packed unsigned 16-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 15 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + + Compare packed unsigned 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 16-bit integers in "a" and "b" for equality, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 16-bit integers in "a" and "b" for - greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 16-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 16-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 16-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 16-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 16-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k". - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 7 - i := j*16 - k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 16-bit integers in "a" and "b" for equality, and store - the results in mask vector "k". - - FOR j := 0 to 7 - i := j*16 - k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 16-bit integers in "a" and "b" for - greater-than-or-equal, and store the results in mask vector "k". - - FOR j := 0 to 7 - i := j*16 - k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 16-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k". - - FOR j := 0 to 7 - i := j*16 - k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 16-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 7 - i := j*16 - k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 16-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k". - - FOR j := 0 to 7 - i := j*16 - k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 16-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k". - - FOR j := 0 to 7 - i := j*16 - k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - - Compare packed unsigned 16-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 7 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + + Compare packed unsigned 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 16-bit integers in "a" and "b" for equality, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 16-bit integers in "a" and "b" for - greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 16-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 16-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 16-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 16-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 16-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k". - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 15 - i := j*16 - k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 16-bit integers in "a" and "b" for equality, and store - the results in mask vector "k". - - FOR j := 0 to 15 - i := j*16 - k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 16-bit integers in "a" and "b" for greater-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 15 - i := j*16 - k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 16-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k". - - FOR j := 0 to 15 - i := j*16 - k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 16-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 15 - i := j*16 - k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 16-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k". - - FOR j := 0 to 15 - i := j*16 - k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 16-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k". - - FOR j := 0 to 15 - i := j*16 - k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*16 + k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - - Compare packed signed 16-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 15 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + + Compare packed signed 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 16-bit integers in "a" and "b" for equality, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 16-bit integers in "a" and "b" for greater-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 16-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 16-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 16-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 16-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 16-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k". - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 7 - i := j*16 - k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 16-bit integers in "a" and "b" for equality, and store - the results in mask vector "k". - - FOR j := 0 to 7 - i := j*16 - k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 16-bit integers in "a" and "b" for greater-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 7 - i := j*16 - k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 16-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k". - - FOR j := 0 to 7 - i := j*16 - k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 16-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 7 - i := j*16 - k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 16-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k". - - FOR j := 0 to 7 - i := j*16 - k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 16-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k". - - FOR j := 0 to 7 - i := j*16 - k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*16 + k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - - Compare packed signed 16-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 7 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + + Compare packed signed 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 16-bit integers in "a" and "b" for equality, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 16-bit integers in "a" and "b" for greater-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 16-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 16-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 16-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 16-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compute the bitwise AND of packed 8-bit integers in "a" and "b", producing - intermediate 8-bit values, and set the corresponding bit in result mask "k" (subject to - writemask "k") if the intermediate value is non-zero. - - FOR j := 0 to 31 - i := j*8 - IF k1[j] - k[j] := ((a[i+7:i] AND b[i+7:i]) != 0) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compute the bitwise AND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero. + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ((a[i+7:i] AND b[i+7:i]) != 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compute the bitwise AND of packed 8-bit integers in "a" and "b", producing - intermediate 8-bit values, and set the corresponding bit in result mask "k" if the - intermediate value is non-zero. - - FOR j := 0 to 31 - i := j*8 - k[j] := ((a[i+7:i] AND b[i+7:i]) != 0) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compute the bitwise AND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero. + +FOR j := 0 to 31 + i := j*8 + k[j] := ((a[i+7:i] AND b[i+7:i]) != 0) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compute the bitwise AND of packed 8-bit integers in "a" and "b", producing - intermediate 8-bit values, and set the corresponding bit in result mask "k" (subject to - writemask "k") if the intermediate value is non-zero. - - FOR j := 0 to 15 - i := j*8 - IF k1[j] - k[j] := ((a[i+7:i] AND b[i+7:i]) != 0) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compute the bitwise AND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero. + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ((a[i+7:i] AND b[i+7:i]) != 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compute the bitwise AND of packed 8-bit integers in "a" and "b", producing - intermediate 8-bit values, and set the corresponding bit in result mask "k" if the - intermediate value is non-zero. - - FOR j := 0 to 15 - i := j*8 - k[j] := ((a[i+7:i] AND b[i+7:i]) != 0) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compute the bitwise AND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero. + +FOR j := 0 to 15 + i := j*8 + k[j] := ((a[i+7:i] AND b[i+7:i]) != 0) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compute the bitwise AND of packed 16-bit integers in "a" and "b", producing - intermediate 16-bit values, and set the corresponding bit in result mask "k" (subject to - writemask "k") if the intermediate value is non-zero. - - FOR j := 0 to 15 - i := j*16 - IF k1[j] - k[j] := ((a[i+15:i] AND b[i+15:i]) != 0) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compute the bitwise AND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero. + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ((a[i+15:i] AND b[i+15:i]) != 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compute the bitwise AND of packed 16-bit integers in "a" and "b", producing - intermediate 16-bit values, and set the corresponding bit in result mask "k" if the - intermediate value is non-zero. - - FOR j := 0 to 15 - i := j*16 - k[j] := ((a[i+15:i] AND b[i+15:i]) != 0) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compute the bitwise AND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero. + +FOR j := 0 to 15 + i := j*16 + k[j] := ((a[i+15:i] AND b[i+15:i]) != 0) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compute the bitwise AND of packed 16-bit integers in "a" and "b", producing - intermediate 16-bit values, and set the corresponding bit in result mask "k" (subject to - writemask "k") if the intermediate value is non-zero. - - FOR j := 0 to 7 - i := j*16 - IF k1[j] - k[j] := ((a[i+15:i] AND b[i+15:i]) != 0) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compute the bitwise AND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero. + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ((a[i+15:i] AND b[i+15:i]) != 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compute the bitwise AND of packed 16-bit integers in "a" and "b", producing - intermediate 16-bit values, and set the corresponding bit in result mask "k" if the - intermediate value is non-zero. - - FOR j := 0 to 7 - i := j*16 - k[j] := ((a[i+15:i] AND b[i+15:i]) != 0) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compute the bitwise AND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero. + +FOR j := 0 to 7 + i := j*16 + k[j] := ((a[i+15:i] AND b[i+15:i]) != 0) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compute the bitwise NAND of packed 8-bit integers in "a" and "b", producing - intermediate 8-bit values, and set the corresponding bit in result mask "k" (subject to - writemask "k") if the intermediate value is zero. - - FOR j := 0 to 31 - i := j*8 - IF k1[j] - k[j] := ((a[i+7:i] AND b[i+7:i]) == 0) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compute the bitwise NAND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero. + +FOR j := 0 to 31 + i := j*8 + IF k1[j] + k[j] := ((a[i+7:i] AND b[i+7:i]) == 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compute the bitwise NAND of packed 8-bit integers in "a" and "b", producing - intermediate 8-bit values, and set the corresponding bit in result mask "k" if the - intermediate value is zero. - - FOR j := 0 to 31 - i := j*8 - k[j] := ((a[i+7:i] AND b[i+7:i]) == 0) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compute the bitwise NAND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero. + +FOR j := 0 to 31 + i := j*8 + k[j] := ((a[i+7:i] AND b[i+7:i]) == 0) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compute the bitwise NAND of packed 8-bit integers in "a" and "b", producing - intermediate 8-bit values, and set the corresponding bit in result mask "k" (subject to - writemask "k") if the intermediate value is zero. - - FOR j := 0 to 15 - i := j*8 - IF k1[j] - k[j] := ((a[i+7:i] AND b[i+7:i]) == 0) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compute the bitwise NAND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero. + +FOR j := 0 to 15 + i := j*8 + IF k1[j] + k[j] := ((a[i+7:i] AND b[i+7:i]) == 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compute the bitwise NAND of packed 8-bit integers in "a" and "b", producing - intermediate 8-bit values, and set the corresponding bit in result mask "k" if the - intermediate value is zero. - - FOR j := 0 to 15 - i := j*8 - k[j] := ((a[i+7:i] AND b[i+7:i]) == 0) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compute the bitwise NAND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero. + +FOR j := 0 to 15 + i := j*8 + k[j] := ((a[i+7:i] AND b[i+7:i]) == 0) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compute the bitwise NAND of packed 16-bit integers in "a" and "b", producing - intermediate 16-bit values, and set the corresponding bit in result mask "k" (subject to - writemask "k") if the intermediate value is zero. - - FOR j := 0 to 15 - i := j*16 - IF k1[j] - k[j] := ((a[i+15:i] AND b[i+15:i]) == 0) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compute the bitwise NAND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero. + +FOR j := 0 to 15 + i := j*16 + IF k1[j] + k[j] := ((a[i+15:i] AND b[i+15:i]) == 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compute the bitwise NAND of packed 16-bit integers in "a" and "b", producing - intermediate 16-bit values, and set the corresponding bit in result mask "k" if the - intermediate value is zero. - - FOR j := 0 to 15 - i := j*16 - k[j] := ((a[i+15:i] AND b[i+15:i]) == 0) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compute the bitwise NAND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero. + +FOR j := 0 to 15 + i := j*16 + k[j] := ((a[i+15:i] AND b[i+15:i]) == 0) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - Compute the bitwise NAND of packed 16-bit integers in "a" and "b", producing - intermediate 16-bit values, and set the corresponding bit in result mask "k" (subject to - writemask "k") if the intermediate value is zero. - - FOR j := 0 to 7 - i := j*16 - IF k1[j] - k[j] := ((a[i+15:i] AND b[i+15:i]) == 0) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + + Compute the bitwise NAND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero. + +FOR j := 0 to 7 + i := j*16 + IF k1[j] + k[j] := ((a[i+15:i] AND b[i+15:i]) == 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - Compute the bitwise NAND of packed 16-bit integers in "a" and "b", producing - intermediate 16-bit values, and set the corresponding bit in result mask "k" if the - intermediate value is zero. - - FOR j := 0 to 7 - i := j*16 - k[j] := ((a[i+15:i] AND b[i+15:i]) == 0) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Compare + + + + Compute the bitwise NAND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero. + +FOR j := 0 to 7 + i := j*16 + k[j] := ((a[i+15:i] AND b[i+15:i]) == 0) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Compare
- - - - - - Shift packed 16-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - IF count[i+15:i] < 16 + + + + + + Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF count[i+15:i] < 16 dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[i+15:i]) - ELSE + ELSE dst[i+15:i] := 0 - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - IF count[i+15:i] < 16 + + + + + Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF count[i+15:i] < 16 dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ELSE + ELSE dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in - "dst". - - FOR j := 0 to 15 - i := j*16 - IF count[i+15:i] < 16 - dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + IF count[i+15:i] < 16 + dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 16-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - IF count[i+15:i] < 16 + + + + + + Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF count[i+15:i] < 16 dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[i+15:i]) - ELSE + ELSE dst[i+15:i] := 0 - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - IF count[i+15:i] < 16 + + + + + Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF count[i+15:i] < 16 dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[i+15:i]) - ELSE + ELSE dst[i+15:i] := 0 - FI - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in - "dst". - - FOR j := 0 to 7 - i := j*16 - IF count[i+15:i] < 16 - dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + IF count[i+15:i] < 16 + dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - IF count[63:0] > 15 + + + + + + Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF count[63:0] > 15 dst[i+15:i] := 0 - ELSE + ELSE dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[63:0]) - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - IF imm8[7:0] > 15 + + + + + + Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 dst[i+15:i] := 0 - ELSE + ELSE dst[i+15:i] := ZeroExtend16(a[i+15:i] << imm8[7:0]) - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - IF count[63:0] > 15 + + + + + Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF count[63:0] > 15 dst[i+15:i] := 0 - ELSE + ELSE dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[63:0]) - FI - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - IF imm8[7:0] > 15 + + + + + Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 dst[i+15:i] := 0 - ELSE + ELSE dst[i+15:i] := ZeroExtend16(a[i+15:i] << imm8[7:0]) - FI - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - IF count[63:0] > 15 + + + + + + Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF count[63:0] > 15 dst[i+15:i] := 0 - ELSE + ELSE dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[63:0]) - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - IF imm8[7:0] > 15 + + + + + + Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 dst[i+15:i] := 0 - ELSE + ELSE dst[i+15:i] := ZeroExtend16(a[i+15:i] << imm8[7:0]) - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - IF count[63:0] > 15 + + + + + Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF count[63:0] > 15 dst[i+15:i] := 0 - ELSE + ELSE dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[63:0]) - FI - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - IF imm8[7:0] > 15 + + + + + Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 dst[i+15:i] := 0 - ELSE + ELSE dst[i+15:i] := ZeroExtend16(a[i+15:i] << imm8[7:0]) - FI - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 16-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - IF count[i+15:i] < 16 + + + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF count[i+15:i] < 16 dst[i+15:i] := SignExtend16(a[i+15:i] >> count[i+15:i]) - ELSE + ELSE dst[i+15:i] := (a[i+15] ? 0xFFFF : 0) - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - IF count[i+15:i] < 16 + + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF count[i+15:i] < 16 dst[i+15:i] := SignExtend16(a[i+15:i] >> count[i+15:i]) - ELSE + ELSE dst[i+15:i] := (a[i+15] ? 0xFFFF : 0) - FI - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst". - - FOR j := 0 to 15 - i := j*16 - IF count[i+15:i] < 16 - dst[i+15:i] := SignExtend16(a[i+15:i] >> count[i+15:i]) - ELSE - dst[i+15:i] := (a[i+15] ? 0xFFFF : 0) - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + IF count[i+15:i] < 16 + dst[i+15:i] := SignExtend16(a[i+15:i] >> count[i+15:i]) + ELSE + dst[i+15:i] := (a[i+15] ? 0xFFFF : 0) + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 16-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - IF count[i+15:i] < 16 + + + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF count[i+15:i] < 16 dst[i+15:i] := SignExtend16(a[i+15:i] >> count[i+15:i]) - ELSE + ELSE dst[i+15:i] := (a[i+15] ? 0xFFFF : 0) - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - IF count[i+15:i] < 16 + + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF count[i+15:i] < 16 dst[i+15:i] := SignExtend16(a[i+15:i] >> count[i+15:i]) - ELSE + ELSE dst[i+15:i] := (a[i+15] ? 0xFFFF : 0) - FI - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst". - - FOR j := 0 to 7 - i := j*16 - IF count[i+15:i] < 16 - dst[i+15:i] := SignExtend16(a[i+15:i] >> count[i+15:i]) - ELSE - dst[i+15:i] := (a[i+15] ? 0xFFFF : 0) - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + IF count[i+15:i] < 16 + dst[i+15:i] := SignExtend16(a[i+15:i] >> count[i+15:i]) + ELSE + dst[i+15:i] := (a[i+15] ? 0xFFFF : 0) + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 16-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - IF count[63:0] > 15 + + + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF count[63:0] > 15 dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) - ELSE + ELSE dst[i+15:i] := SignExtend16(a[i+15:i] >> count[63:0]) - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - IF imm8[7:0] > 15 + + + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) - ELSE + ELSE dst[i+15:i] := SignExtend16(a[i+15:i] >> imm8[7:0]) - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - IF count[63:0] > 15 + + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF count[63:0] > 15 dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) - ELSE + ELSE dst[i+15:i] := SignExtend16(a[i+15:i] >> count[63:0]) - FI - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - IF imm8[7:0] > 15 + + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) - ELSE + ELSE dst[i+15:i] := SignExtend16(a[i+15:i] >> imm8[7:0]) - FI - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 16-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - IF count[63:0] > 15 + + + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF count[63:0] > 15 dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) - ELSE + ELSE dst[i+15:i] := SignExtend16(a[i+15:i] >> count[63:0]) - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - IF imm8[7:0] > 15 + + + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) - ELSE + ELSE dst[i+15:i] := SignExtend16(a[i+15:i] >> imm8[7:0]) - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - IF count[63:0] > 15 + + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF count[63:0] > 15 dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) - ELSE + ELSE dst[i+15:i] := SignExtend16(a[i+15:i] >> count[63:0]) - FI - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - IF imm8[7:0] > 15 + + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) - ELSE + ELSE dst[i+15:i] := SignExtend16(a[i+15:i] >> imm8[7:0]) - FI - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 16-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - IF count[i+15:i] < 16 + + + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF count[i+15:i] < 16 dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[i+15:i]) - ELSE + ELSE dst[i+15:i] := 0 - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - IF count[i+15:i] < 16 + + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF count[i+15:i] < 16 dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ELSE + ELSE dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in - "dst". - - FOR j := 0 to 15 - i := j*16 - IF count[i+15:i] < 16 - dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + IF count[i+15:i] < 16 + dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 16-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - IF count[i+15:i] < 16 + + + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF count[i+15:i] < 16 dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[i+15:i]) - ELSE + ELSE dst[i+15:i] := 0 - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - IF count[i+15:i] < 16 + + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF count[i+15:i] < 16 dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[i+15:i]) - ELSE + ELSE dst[i+15:i] := 0 - FI - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in - "dst". - - FOR j := 0 to 7 - i := j*16 - IF count[i+15:i] < 16 - dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + IF count[i+15:i] < 16 + dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - IF count[63:0] > 15 + + + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF count[63:0] > 15 dst[i+15:i] := 0 - ELSE + ELSE dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[63:0]) - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - IF imm8[7:0] > 15 + + + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 dst[i+15:i] := 0 - ELSE + ELSE dst[i+15:i] := ZeroExtend16(a[i+15:i] >> imm8[7:0]) - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - IF count[63:0] > 15 + + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF count[63:0] > 15 dst[i+15:i] := 0 - ELSE + ELSE dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[63:0]) - FI - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - IF imm8[7:0] > 15 + + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 dst[i+15:i] := 0 - ELSE + ELSE dst[i+15:i] := ZeroExtend16(a[i+15:i] >> imm8[7:0]) - FI - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - IF count[63:0] > 15 + + + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF count[63:0] > 15 dst[i+15:i] := 0 - ELSE + ELSE dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[63:0]) - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - IF imm8[7:0] > 15 + + + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 dst[i+15:i] := 0 - ELSE + ELSE dst[i+15:i] := ZeroExtend16(a[i+15:i] >> imm8[7:0]) - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - IF count[63:0] > 15 + + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF count[63:0] > 15 dst[i+15:i] := 0 - ELSE + ELSE dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[63:0]) - FI - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - IF imm8[7:0] > 15 + + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 dst[i+15:i] := 0 - ELSE + ELSE dst[i+15:i] := ZeroExtend16(a[i+15:i] >> imm8[7:0]) - FI - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512BW - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512BW + AVX512VL +
immintrin.h
+ Shift
- - - Reduce the packed 16-bit integers in "a" by addition. Returns the sum of all - elements in "a". - - DEFINE REDUCE_ADD(src, len) { - IF len == 2 - RETURN src[15:0] + src[31:16] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := src[i+15:i] + src[i+16*len+31:i+16*len] - ENDFOR - RETURN REDUCE_ADD(src[16*len-1:0], len) - } - dst[15:0] := REDUCE_ADD(a, 8) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + Reduce the packed 16-bit integers in "a" by addition. Returns the sum of all elements in "a". + +DEFINE REDUCE_ADD(src, len) { + IF len == 2 + RETURN src[15:0] + src[31:16] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := src[i+15:i] + src[i+16*len+31:i+16*len] + ENDFOR + RETURN REDUCE_ADD(src[16*len-1:0], len) +} +dst[15:0] := REDUCE_ADD(a, 8) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Reduce the packed 16-bit integers in "a" by addition using mask "k". Returns - the sum of all active elements in "a". - - DEFINE REDUCE_ADD(src, len) { - IF len == 2 - RETURN src[15:0] + src[31:16] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := src[i+15:i] + src[i+16*len+15:i+16*len] - ENDFOR - RETURN REDUCE_ADD(src[16*len-1:0], len) - } - tmp := a - FOR j := 0 to 7 - i := j*16 - IF k[j] - tmp[i+15:i] := a[i+15:i] - ELSE - tmp[i+15:i] := 0 - FI - ENDFOR - dst[15:0] := REDUCE_ADD(tmp, 8) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + Reduce the packed 16-bit integers in "a" by addition using mask "k". Returns the sum of all active elements in "a". + +DEFINE REDUCE_ADD(src, len) { + IF len == 2 + RETURN src[15:0] + src[31:16] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := src[i+15:i] + src[i+16*len+15:i+16*len] + ENDFOR + RETURN REDUCE_ADD(src[16*len-1:0], len) +} +tmp := a +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[i+15:i] := a[i+15:i] + ELSE + tmp[i+15:i] := 0 + FI +ENDFOR +dst[15:0] := REDUCE_ADD(tmp, 8) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - Reduce the packed 16-bit integers in "a" by addition. Returns the sum of all - elements in "a". - - DEFINE REDUCE_ADD(src, len) { - IF len == 2 - RETURN src[15:0] + src[31:16] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := src[i+15:i] + src[i+16*len+31:i+16*len] - ENDFOR - RETURN REDUCE_ADD(src[16*len-1:0], len) - } - dst[15:0] := REDUCE_ADD(a, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + Reduce the packed 16-bit integers in "a" by addition. Returns the sum of all elements in "a". + +DEFINE REDUCE_ADD(src, len) { + IF len == 2 + RETURN src[15:0] + src[31:16] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := src[i+15:i] + src[i+16*len+31:i+16*len] + ENDFOR + RETURN REDUCE_ADD(src[16*len-1:0], len) +} +dst[15:0] := REDUCE_ADD(a, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Reduce the packed 16-bit integers in "a" by addition using mask "k". Returns - the sum of all active elements in "a". - - DEFINE REDUCE_ADD(src, len) { - IF len == 2 - RETURN src[15:0] + src[31:16] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := src[i+15:i] + src[i+16*len+15:i+16*len] - ENDFOR - RETURN REDUCE_ADD(src[16*len-1:0], len) - } - tmp := a - FOR j := 0 to 15 - i := j*16 - IF k[j] - tmp[i+15:i] := a[i+15:i] - ELSE - tmp[i+15:i] := 0 - FI - ENDFOR - dst[15:0] := REDUCE_ADD(tmp, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + Reduce the packed 16-bit integers in "a" by addition using mask "k". Returns the sum of all active elements in "a". + +DEFINE REDUCE_ADD(src, len) { + IF len == 2 + RETURN src[15:0] + src[31:16] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := src[i+15:i] + src[i+16*len+15:i+16*len] + ENDFOR + RETURN REDUCE_ADD(src[16*len-1:0], len) +} +tmp := a +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[i+15:i] := a[i+15:i] + ELSE + tmp[i+15:i] := 0 + FI +ENDFOR +dst[15:0] := REDUCE_ADD(tmp, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - Reduce the packed 8-bit integers in "a" by addition. Returns the sum of all - elements in "a". - - DEFINE REDUCE_ADD(src, len) { - IF len == 2 - RETURN src[7:0] + src[15:8] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := src[i+7:i] + src[i+8*len+15:i+8*len] - ENDFOR - RETURN REDUCE_ADD(src[8*len-1:0], len) - } - dst[7:0] := REDUCE_ADD(a, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + Reduce the packed 8-bit integers in "a" by addition. Returns the sum of all elements in "a". + +DEFINE REDUCE_ADD(src, len) { + IF len == 2 + RETURN src[7:0] + src[15:8] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := src[i+7:i] + src[i+8*len+15:i+8*len] + ENDFOR + RETURN REDUCE_ADD(src[8*len-1:0], len) +} +dst[7:0] := REDUCE_ADD(a, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Reduce the packed 8-bit integers in "a" by addition using mask "k". Returns the - sum of all active elements in "a". - - DEFINE REDUCE_ADD(src, len) { - IF len == 2 - RETURN src[7:0] + src[15:8] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := src[i+7:i] + src[i+8*len+7:i+8*len] - ENDFOR - RETURN REDUCE_ADD(src[8*len-1:0], len) - } - tmp := a - FOR j := 0 to 15 - i := j*8 - IF k[j] - tmp[i+7:i] := a[i+7:i] - ELSE - tmp[i+7:i] := 0 - FI - ENDFOR - dst[7:0] := REDUCE_ADD(tmp, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + Reduce the packed 8-bit integers in "a" by addition using mask "k". Returns the sum of all active elements in "a". + +DEFINE REDUCE_ADD(src, len) { + IF len == 2 + RETURN src[7:0] + src[15:8] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := src[i+7:i] + src[i+8*len+7:i+8*len] + ENDFOR + RETURN REDUCE_ADD(src[8*len-1:0], len) +} +tmp := a +FOR j := 0 to 15 + i := j*8 + IF k[j] + tmp[i+7:i] := a[i+7:i] + ELSE + tmp[i+7:i] := 0 + FI +ENDFOR +dst[7:0] := REDUCE_ADD(tmp, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - Reduce the packed 8-bit integers in "a" by addition. Returns the sum of all - elements in "a". - - DEFINE REDUCE_ADD(src, len) { - IF len == 2 - RETURN src[7:0] + src[15:8] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := src[i+7:i] + src[i+8*len+15:i+8*len] - ENDFOR - RETURN REDUCE_ADD(src[8*len-1:0], len) - } - dst[7:0] := REDUCE_ADD(a, 32) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + Reduce the packed 8-bit integers in "a" by addition. Returns the sum of all elements in "a". + +DEFINE REDUCE_ADD(src, len) { + IF len == 2 + RETURN src[7:0] + src[15:8] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := src[i+7:i] + src[i+8*len+15:i+8*len] + ENDFOR + RETURN REDUCE_ADD(src[8*len-1:0], len) +} +dst[7:0] := REDUCE_ADD(a, 32) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Reduce the packed 8-bit integers in "a" by addition using mask "k". Returns the - sum of all active elements in "a". - - DEFINE REDUCE_ADD(src, len) { - IF len == 2 - RETURN src[7:0] + src[15:8] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := src[i+7:i] + src[i+8*len+7:i+8*len] - ENDFOR - RETURN REDUCE_ADD(src[8*len-1:0], len) - } - tmp := a - FOR j := 0 to 31 - i := j*8 - IF k[j] - tmp[i+7:i] := a[i+7:i] - ELSE - tmp[i+7:i] := 0 - FI - ENDFOR - dst[7:0] := REDUCE_ADD(tmp, 32) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + Reduce the packed 8-bit integers in "a" by addition using mask "k". Returns the sum of all active elements in "a". + +DEFINE REDUCE_ADD(src, len) { + IF len == 2 + RETURN src[7:0] + src[15:8] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := src[i+7:i] + src[i+8*len+7:i+8*len] + ENDFOR + RETURN REDUCE_ADD(src[8*len-1:0], len) +} +tmp := a +FOR j := 0 to 31 + i := j*8 + IF k[j] + tmp[i+7:i] := a[i+7:i] + ELSE + tmp[i+7:i] := 0 + FI +ENDFOR +dst[7:0] := REDUCE_ADD(tmp, 32) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - Reduce the packed 16-bit integers in "a" by multiplication. Returns the sum of - all elements in "a". - - DEFINE REDUCE_MUL(src, len) { - IF len == 2 - RETURN src[15:0] * src[31:16] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := src[i+15:i] * src[i+16*len+31:i+16*len] - ENDFOR - RETURN REDUCE_MUL(src[16*len-1:0], len) - } - dst[15:0] := REDUCE_MUL(a, 8) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + Reduce the packed 16-bit integers in "a" by multiplication. Returns the sum of all elements in "a". + +DEFINE REDUCE_MUL(src, len) { + IF len == 2 + RETURN src[15:0] * src[31:16] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := src[i+15:i] * src[i+16*len+31:i+16*len] + ENDFOR + RETURN REDUCE_MUL(src[16*len-1:0], len) +} +dst[15:0] := REDUCE_MUL(a, 8) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Reduce the packed 16-bit integers in "a" by multiplication using mask "k". - Returns the sum of all active elements in "a". - - DEFINE REDUCE_MUL(src, len) { - IF len == 2 - RETURN src[15:0] * src[31:16] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := src[i+15:i] * src[i+16*len+15:i+16*len] - ENDFOR - RETURN REDUCE_MUL(src[16*len-1:0], len) - } - tmp := a - FOR j := 0 to 7 - i := j*16 - IF k[j] - tmp[i+15:i] := a[i+15:i] - ELSE - tmp[i+15:i] := 1 - FI - ENDFOR - dst[15:0] := REDUCE_MUL(tmp, 8) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + Reduce the packed 16-bit integers in "a" by multiplication using mask "k". Returns the sum of all active elements in "a". + +DEFINE REDUCE_MUL(src, len) { + IF len == 2 + RETURN src[15:0] * src[31:16] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := src[i+15:i] * src[i+16*len+15:i+16*len] + ENDFOR + RETURN REDUCE_MUL(src[16*len-1:0], len) +} +tmp := a +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[i+15:i] := a[i+15:i] + ELSE + tmp[i+15:i] := 1 + FI +ENDFOR +dst[15:0] := REDUCE_MUL(tmp, 8) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - Reduce the packed 16-bit integers in "a" by multiplication. Returns the sum of - all elements in "a". - - DEFINE REDUCE_MUL(src, len) { - IF len == 2 - RETURN src[15:0] * src[31:16] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := src[i+15:i] * src[i+16*len+31:i+16*len] - ENDFOR - RETURN REDUCE_MUL(src[16*len-1:0], len) - } - dst[15:0] := REDUCE_MUL(a, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + Reduce the packed 16-bit integers in "a" by multiplication. Returns the sum of all elements in "a". + +DEFINE REDUCE_MUL(src, len) { + IF len == 2 + RETURN src[15:0] * src[31:16] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := src[i+15:i] * src[i+16*len+31:i+16*len] + ENDFOR + RETURN REDUCE_MUL(src[16*len-1:0], len) +} +dst[15:0] := REDUCE_MUL(a, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Reduce the packed 16-bit integers in "a" by multiplication using mask "k". - Returns the sum of all active elements in "a". - - DEFINE REDUCE_MUL(src, len) { - IF len == 2 - RETURN src[15:0] * src[31:16] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := src[i+15:i] * src[i+16*len+15:i+16*len] - ENDFOR - RETURN REDUCE_MUL(src[16*len-1:0], len) - } - tmp := a - FOR j := 0 to 15 - i := j*16 - IF k[j] - tmp[i+15:i] := a[i+15:i] - ELSE - tmp[i+15:i] := 1 - FI - ENDFOR - dst[15:0] := REDUCE_MUL(tmp, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + Reduce the packed 16-bit integers in "a" by multiplication using mask "k". Returns the sum of all active elements in "a". + +DEFINE REDUCE_MUL(src, len) { + IF len == 2 + RETURN src[15:0] * src[31:16] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := src[i+15:i] * src[i+16*len+15:i+16*len] + ENDFOR + RETURN REDUCE_MUL(src[16*len-1:0], len) +} +tmp := a +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[i+15:i] := a[i+15:i] + ELSE + tmp[i+15:i] := 1 + FI +ENDFOR +dst[15:0] := REDUCE_MUL(tmp, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - Reduce the packed 8-bit integers in "a" by multiplication. Returns the sum of - all elements in "a". - - DEFINE REDUCE_MUL(src, len) { - IF len == 2 - RETURN src[7:0] * src[15:8] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := src[i+7:i] * src[i+8*len+15:i+8*len] - ENDFOR - RETURN REDUCE_MUL(src[8*len-1:0], len) - } - dst[7:0] := REDUCE_MUL(a, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + Reduce the packed 8-bit integers in "a" by multiplication. Returns the sum of all elements in "a". + +DEFINE REDUCE_MUL(src, len) { + IF len == 2 + RETURN src[7:0] * src[15:8] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := src[i+7:i] * src[i+8*len+15:i+8*len] + ENDFOR + RETURN REDUCE_MUL(src[8*len-1:0], len) +} +dst[7:0] := REDUCE_MUL(a, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Reduce the packed 8-bit integers in "a" by multiplication using mask "k". - Returns the sum of all active elements in "a". - - DEFINE REDUCE_MUL(src, len) { - IF len == 2 - RETURN src[7:0] * src[15:8] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := src[i+7:i] * src[i+8*len+7:i+8*len] - ENDFOR - RETURN REDUCE_MUL(src[8*len-1:0], len) - } - tmp := a - FOR j := 0 to 15 - i := j*8 - IF k[j] - tmp[i+7:i] := a[i+7:i] - ELSE - tmp[i+7:i] := 1 - FI - ENDFOR - dst[7:0] := REDUCE_MUL(tmp, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + Reduce the packed 8-bit integers in "a" by multiplication using mask "k". Returns the sum of all active elements in "a". + +DEFINE REDUCE_MUL(src, len) { + IF len == 2 + RETURN src[7:0] * src[15:8] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := src[i+7:i] * src[i+8*len+7:i+8*len] + ENDFOR + RETURN REDUCE_MUL(src[8*len-1:0], len) +} +tmp := a +FOR j := 0 to 15 + i := j*8 + IF k[j] + tmp[i+7:i] := a[i+7:i] + ELSE + tmp[i+7:i] := 1 + FI +ENDFOR +dst[7:0] := REDUCE_MUL(tmp, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - Reduce the packed 8-bit integers in "a" by multiplication. Returns the sum of - all elements in "a". - - DEFINE REDUCE_MUL(src, len) { - IF len == 2 - RETURN src[7:0] * src[15:8] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := src[i+7:i] * src[i+8*len+15:i+8*len] - ENDFOR - RETURN REDUCE_MUL(src[8*len-1:0], len) - } - dst[7:0] := REDUCE_MUL(a, 32) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + Reduce the packed 8-bit integers in "a" by multiplication. Returns the sum of all elements in "a". + +DEFINE REDUCE_MUL(src, len) { + IF len == 2 + RETURN src[7:0] * src[15:8] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := src[i+7:i] * src[i+8*len+15:i+8*len] + ENDFOR + RETURN REDUCE_MUL(src[8*len-1:0], len) +} +dst[7:0] := REDUCE_MUL(a, 32) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Reduce the packed 8-bit integers in "a" by multiplication using mask "k". - Returns the sum of all active elements in "a". - - DEFINE REDUCE_MUL(src, len) { - IF len == 2 - RETURN src[7:0] * src[15:8] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := src[i+7:i] * src[i+8*len+7:i+8*len] - ENDFOR - RETURN REDUCE_MUL(src[8*len-1:0], len) - } - tmp := a - FOR j := 0 to 31 - i := j*8 - IF k[j] - tmp[i+7:i] := a[i+7:i] - ELSE - tmp[i+7:i] := 1 - FI - ENDFOR - dst[7:0] := REDUCE_MUL(tmp, 32) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + Reduce the packed 8-bit integers in "a" by multiplication using mask "k". Returns the sum of all active elements in "a". + +DEFINE REDUCE_MUL(src, len) { + IF len == 2 + RETURN src[7:0] * src[15:8] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := src[i+7:i] * src[i+8*len+7:i+8*len] + ENDFOR + RETURN REDUCE_MUL(src[8*len-1:0], len) +} +tmp := a +FOR j := 0 to 31 + i := j*8 + IF k[j] + tmp[i+7:i] := a[i+7:i] + ELSE + tmp[i+7:i] := 1 + FI +ENDFOR +dst[7:0] := REDUCE_MUL(tmp, 32) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - Reduce the packed 16-bit integers in "a" by multiplication. Returns the sum of - all elements in "a". - - DEFINE REDUCE_OR(src, len) { - IF len == 2 - RETURN src[15:0] OR src[31:16] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := src[i+15:i] OR src[i+16*len+31:i+16*len] - ENDFOR - RETURN REDUCE_OR(src[16*len-1:0], len) - } - dst[15:0] := REDUCE_OR(a, 8) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + Reduce the packed 16-bit integers in "a" by multiplication. Returns the sum of all elements in "a". + +DEFINE REDUCE_OR(src, len) { + IF len == 2 + RETURN src[15:0] OR src[31:16] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := src[i+15:i] OR src[i+16*len+31:i+16*len] + ENDFOR + RETURN REDUCE_OR(src[16*len-1:0], len) +} +dst[15:0] := REDUCE_OR(a, 8) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Reduce the packed 16-bit integers in "a" by multiplication using mask "k". - Returns the sum of all active elements in "a". - - DEFINE REDUCE_OR(src, len) { - IF len == 2 - RETURN src[15:0] OR src[31:16] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := src[i+15:i] OR src[i+16*len+15:i+16*len] - ENDFOR - RETURN REDUCE_OR(src[16*len-1:0], len) - } - tmp := a - FOR j := 0 to 7 - i := j*16 - IF k[j] - tmp[i+15:i] := a[i+15:i] - ELSE - tmp[i+15:i] := 0 - FI - ENDFOR - dst[15:0] := REDUCE_OR(tmp, 8) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + Reduce the packed 16-bit integers in "a" by multiplication using mask "k". Returns the sum of all active elements in "a". + +DEFINE REDUCE_OR(src, len) { + IF len == 2 + RETURN src[15:0] OR src[31:16] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := src[i+15:i] OR src[i+16*len+15:i+16*len] + ENDFOR + RETURN REDUCE_OR(src[16*len-1:0], len) +} +tmp := a +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[i+15:i] := a[i+15:i] + ELSE + tmp[i+15:i] := 0 + FI +ENDFOR +dst[15:0] := REDUCE_OR(tmp, 8) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - Reduce the packed 16-bit integers in "a" by multiplication. Returns the sum of - all elements in "a". - - DEFINE REDUCE_OR(src, len) { - IF len == 2 - RETURN src[15:0] OR src[31:16] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := src[i+15:i] OR src[i+16*len+31:i+16*len] - ENDFOR - RETURN REDUCE_OR(src[16*len-1:0], len) - } - dst[15:0] := REDUCE_OR(a, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + Reduce the packed 16-bit integers in "a" by multiplication. Returns the sum of all elements in "a". + +DEFINE REDUCE_OR(src, len) { + IF len == 2 + RETURN src[15:0] OR src[31:16] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := src[i+15:i] OR src[i+16*len+31:i+16*len] + ENDFOR + RETURN REDUCE_OR(src[16*len-1:0], len) +} +dst[15:0] := REDUCE_OR(a, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Reduce the packed 16-bit integers in "a" by multiplication using mask "k". - Returns the sum of all active elements in "a". - - DEFINE REDUCE_OR(src, len) { - IF len == 2 - RETURN src[15:0] OR src[31:16] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := src[i+15:i] OR src[i+16*len+15:i+16*len] - ENDFOR - RETURN REDUCE_OR(src[16*len-1:0], len) - } - tmp := a - FOR j := 0 to 15 - i := j*16 - IF k[j] - tmp[i+15:i] := a[i+15:i] - ELSE - tmp[i+15:i] := 0 - FI - ENDFOR - dst[15:0] := REDUCE_OR(tmp, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + Reduce the packed 16-bit integers in "a" by multiplication using mask "k". Returns the sum of all active elements in "a". + +DEFINE REDUCE_OR(src, len) { + IF len == 2 + RETURN src[15:0] OR src[31:16] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := src[i+15:i] OR src[i+16*len+15:i+16*len] + ENDFOR + RETURN REDUCE_OR(src[16*len-1:0], len) +} +tmp := a +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[i+15:i] := a[i+15:i] + ELSE + tmp[i+15:i] := 0 + FI +ENDFOR +dst[15:0] := REDUCE_OR(tmp, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - Reduce the packed 8-bit integers in "a" by multiplication. Returns the sum of - all elements in "a". - - DEFINE REDUCE_OR(src, len) { - IF len == 2 - RETURN src[7:0] OR src[15:8] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := src[i+7:i] OR src[i+8*len+15:i+8*len] - ENDFOR - RETURN REDUCE_OR(src[8*len-1:0], len) - } - dst[7:0] := REDUCE_OR(a, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + Reduce the packed 8-bit integers in "a" by multiplication. Returns the sum of all elements in "a". + +DEFINE REDUCE_OR(src, len) { + IF len == 2 + RETURN src[7:0] OR src[15:8] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := src[i+7:i] OR src[i+8*len+15:i+8*len] + ENDFOR + RETURN REDUCE_OR(src[8*len-1:0], len) +} +dst[7:0] := REDUCE_OR(a, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Reduce the packed 8-bit integers in "a" by multiplication using mask "k". - Returns the sum of all active elements in "a". - - DEFINE REDUCE_OR(src, len) { - IF len == 2 - RETURN src[7:0] OR src[15:8] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := src[i+7:i] OR src[i+8*len+7:i+8*len] - ENDFOR - RETURN REDUCE_OR(src[8*len-1:0], len) - } - tmp := a - FOR j := 0 to 15 - i := j*8 - IF k[j] - tmp[i+7:i] := a[i+7:i] - ELSE - tmp[i+7:i] := 0 - FI - ENDFOR - dst[7:0] := REDUCE_OR(tmp, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + Reduce the packed 8-bit integers in "a" by multiplication using mask "k". Returns the sum of all active elements in "a". + +DEFINE REDUCE_OR(src, len) { + IF len == 2 + RETURN src[7:0] OR src[15:8] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := src[i+7:i] OR src[i+8*len+7:i+8*len] + ENDFOR + RETURN REDUCE_OR(src[8*len-1:0], len) +} +tmp := a +FOR j := 0 to 15 + i := j*8 + IF k[j] + tmp[i+7:i] := a[i+7:i] + ELSE + tmp[i+7:i] := 0 + FI +ENDFOR +dst[7:0] := REDUCE_OR(tmp, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - Reduce the packed 8-bit integers in "a" by multiplication. Returns the sum of - all elements in "a". - - DEFINE REDUCE_OR(src, len) { - IF len == 2 - RETURN src[7:0] OR src[15:8] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := src[i+7:i] OR src[i+8*len+15:i+8*len] - ENDFOR - RETURN REDUCE_OR(src[8*len-1:0], len) - } - dst[7:0] := REDUCE_OR(a, 32) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + Reduce the packed 8-bit integers in "a" by multiplication. Returns the sum of all elements in "a". + +DEFINE REDUCE_OR(src, len) { + IF len == 2 + RETURN src[7:0] OR src[15:8] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := src[i+7:i] OR src[i+8*len+15:i+8*len] + ENDFOR + RETURN REDUCE_OR(src[8*len-1:0], len) +} +dst[7:0] := REDUCE_OR(a, 32) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Reduce the packed 8-bit integers in "a" by multiplication using mask "k". - Returns the sum of all active elements in "a". - - DEFINE REDUCE_OR(src, len) { - IF len == 2 - RETURN src[7:0] OR src[15:8] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := src[i+7:i] OR src[i+8*len+7:i+8*len] - ENDFOR - RETURN REDUCE_OR(src[8*len-1:0], len) - } - tmp := a - FOR j := 0 to 31 - i := j*8 - IF k[j] - tmp[i+7:i] := a[i+7:i] - ELSE - tmp[i+7:i] := 0 - FI - ENDFOR - dst[7:0] := REDUCE_OR(tmp, 32) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + Reduce the packed 8-bit integers in "a" by multiplication using mask "k". Returns the sum of all active elements in "a". + +DEFINE REDUCE_OR(src, len) { + IF len == 2 + RETURN src[7:0] OR src[15:8] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := src[i+7:i] OR src[i+8*len+7:i+8*len] + ENDFOR + RETURN REDUCE_OR(src[8*len-1:0], len) +} +tmp := a +FOR j := 0 to 31 + i := j*8 + IF k[j] + tmp[i+7:i] := a[i+7:i] + ELSE + tmp[i+7:i] := 0 + FI +ENDFOR +dst[7:0] := REDUCE_OR(tmp, 32) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - Reduce the packed 16-bit integers in "a" by multiplication. Returns the sum of - all elements in "a". - - DEFINE REDUCE_AND(src, len) { - IF len == 2 - RETURN src[15:0] AND src[31:16] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := src[i+15:i] AND src[i+16*len+31:i+16*len] - ENDFOR - RETURN REDUCE_AND(src[16*len-1:0], len) - } - dst[15:0] := REDUCE_AND(a, 8) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + Reduce the packed 16-bit integers in "a" by multiplication. Returns the sum of all elements in "a". + +DEFINE REDUCE_AND(src, len) { + IF len == 2 + RETURN src[15:0] AND src[31:16] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := src[i+15:i] AND src[i+16*len+31:i+16*len] + ENDFOR + RETURN REDUCE_AND(src[16*len-1:0], len) +} +dst[15:0] := REDUCE_AND(a, 8) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Reduce the packed 16-bit integers in "a" by multiplication using mask "k". - Returns the sum of all active elements in "a". - - DEFINE REDUCE_AND(src, len) { - IF len == 2 - RETURN src[15:0] AND src[31:16] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := src[i+15:i] AND src[i+16*len+15:i+16*len] - ENDFOR - RETURN REDUCE_AND(src[16*len-1:0], len) - } - tmp := a - FOR j := 0 to 7 - i := j*16 - IF k[j] - tmp[i+15:i] := a[i+15:i] - ELSE - tmp[i+15:i] := 0xFFFF - FI - ENDFOR - dst[15:0] := REDUCE_AND(tmp, 8) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + Reduce the packed 16-bit integers in "a" by multiplication using mask "k". Returns the sum of all active elements in "a". + +DEFINE REDUCE_AND(src, len) { + IF len == 2 + RETURN src[15:0] AND src[31:16] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := src[i+15:i] AND src[i+16*len+15:i+16*len] + ENDFOR + RETURN REDUCE_AND(src[16*len-1:0], len) +} +tmp := a +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[i+15:i] := a[i+15:i] + ELSE + tmp[i+15:i] := 0xFFFF + FI +ENDFOR +dst[15:0] := REDUCE_AND(tmp, 8) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - Reduce the packed 16-bit integers in "a" by multiplication. Returns the sum of - all elements in "a". - - DEFINE REDUCE_AND(src, len) { - IF len == 2 - RETURN src[15:0] AND src[31:16] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := src[i+15:i] AND src[i+16*len+31:i+16*len] - ENDFOR - RETURN REDUCE_AND(src[16*len-1:0], len) - } - dst[15:0] := REDUCE_AND(a, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + Reduce the packed 16-bit integers in "a" by multiplication. Returns the sum of all elements in "a". + +DEFINE REDUCE_AND(src, len) { + IF len == 2 + RETURN src[15:0] AND src[31:16] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := src[i+15:i] AND src[i+16*len+31:i+16*len] + ENDFOR + RETURN REDUCE_AND(src[16*len-1:0], len) +} +dst[15:0] := REDUCE_AND(a, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Reduce the packed 16-bit integers in "a" by multiplication using mask "k". - Returns the sum of all active elements in "a". - - DEFINE REDUCE_AND(src, len) { - IF len == 2 - RETURN src[15:0] AND src[31:16] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := src[i+15:i] AND src[i+16*len+15:i+16*len] - ENDFOR - RETURN REDUCE_AND(src[16*len-1:0], len) - } - tmp := a - FOR j := 0 to 15 - i := j*16 - IF k[j] - tmp[i+15:i] := a[i+15:i] - ELSE - tmp[i+15:i] := 0xFFFF - FI - ENDFOR - dst[15:0] := REDUCE_AND(tmp, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + Reduce the packed 16-bit integers in "a" by multiplication using mask "k". Returns the sum of all active elements in "a". + +DEFINE REDUCE_AND(src, len) { + IF len == 2 + RETURN src[15:0] AND src[31:16] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := src[i+15:i] AND src[i+16*len+15:i+16*len] + ENDFOR + RETURN REDUCE_AND(src[16*len-1:0], len) +} +tmp := a +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[i+15:i] := a[i+15:i] + ELSE + tmp[i+15:i] := 0xFFFF + FI +ENDFOR +dst[15:0] := REDUCE_AND(tmp, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - Reduce the packed 8-bit integers in "a" by multiplication. Returns the sum of - all elements in "a". - - DEFINE REDUCE_AND(src, len) { - IF len == 2 - RETURN src[7:0] AND src[15:8] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := src[i+7:i] AND src[i+8*len+15:i+8*len] - ENDFOR - RETURN REDUCE_AND(src[8*len-1:0], len) - } - dst[7:0] := REDUCE_AND(a, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + Reduce the packed 8-bit integers in "a" by multiplication. Returns the sum of all elements in "a". + +DEFINE REDUCE_AND(src, len) { + IF len == 2 + RETURN src[7:0] AND src[15:8] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := src[i+7:i] AND src[i+8*len+15:i+8*len] + ENDFOR + RETURN REDUCE_AND(src[8*len-1:0], len) +} +dst[7:0] := REDUCE_AND(a, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Reduce the packed 8-bit integers in "a" by multiplication using mask "k". - Returns the sum of all active elements in "a". - - DEFINE REDUCE_AND(src, len) { - IF len == 2 - RETURN src[7:0] AND src[15:8] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := src[i+7:i] AND src[i+8*len+7:i+8*len] - ENDFOR - RETURN REDUCE_AND(src[8*len-1:0], len) - } - tmp := a - FOR j := 0 to 15 - i := j*8 - IF k[j] - tmp[i+7:i] := a[i+7:i] - ELSE - tmp[i+7:i] := 0xFF - FI - ENDFOR - dst[7:0] := REDUCE_AND(tmp, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + Reduce the packed 8-bit integers in "a" by multiplication using mask "k". Returns the sum of all active elements in "a". + +DEFINE REDUCE_AND(src, len) { + IF len == 2 + RETURN src[7:0] AND src[15:8] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := src[i+7:i] AND src[i+8*len+7:i+8*len] + ENDFOR + RETURN REDUCE_AND(src[8*len-1:0], len) +} +tmp := a +FOR j := 0 to 15 + i := j*8 + IF k[j] + tmp[i+7:i] := a[i+7:i] + ELSE + tmp[i+7:i] := 0xFF + FI +ENDFOR +dst[7:0] := REDUCE_AND(tmp, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - Reduce the packed 8-bit integers in "a" by multiplication. Returns the sum of - all elements in "a". - - DEFINE REDUCE_AND(src, len) { - IF len == 2 - RETURN src[7:0] AND src[15:8] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := src[i+7:i] AND src[i+8*len+15:i+8*len] - ENDFOR - RETURN REDUCE_AND(src[8*len-1:0], len) - } - dst[7:0] := REDUCE_AND(a, 32) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + Reduce the packed 8-bit integers in "a" by multiplication. Returns the sum of all elements in "a". + +DEFINE REDUCE_AND(src, len) { + IF len == 2 + RETURN src[7:0] AND src[15:8] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := src[i+7:i] AND src[i+8*len+15:i+8*len] + ENDFOR + RETURN REDUCE_AND(src[8*len-1:0], len) +} +dst[7:0] := REDUCE_AND(a, 32) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Reduce the packed 8-bit integers in "a" by multiplication using mask "k". - Returns the sum of all active elements in "a". - - DEFINE REDUCE_AND(src, len) { - IF len == 2 - RETURN src[7:0] AND src[15:8] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := src[i+7:i] AND src[i+8*len+7:i+8*len] - ENDFOR - RETURN REDUCE_AND(src[8*len-1:0], len) - } - tmp := a - FOR j := 0 to 31 - i := j*8 - IF k[j] - tmp[i+7:i] := a[i+7:i] - ELSE - tmp[i+7:i] := 0xFF - FI - ENDFOR - dst[7:0] := REDUCE_AND(tmp, 32) - - AVX512BW - AVX512VL -
immintrin.h
- Arithmetic + + + + Reduce the packed 8-bit integers in "a" by multiplication using mask "k". Returns the sum of all active elements in "a". + +DEFINE REDUCE_AND(src, len) { + IF len == 2 + RETURN src[7:0] AND src[15:8] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := src[i+7:i] AND src[i+8*len+7:i+8*len] + ENDFOR + RETURN REDUCE_AND(src[8*len-1:0], len) +} +tmp := a +FOR j := 0 to 31 + i := j*8 + IF k[j] + tmp[i+7:i] := a[i+7:i] + ELSE + tmp[i+7:i] := 0xFF + FI +ENDFOR +dst[7:0] := REDUCE_AND(tmp, 32) + + AVX512BW + AVX512VL +
immintrin.h
+ Arithmetic
- - - Reduce the packed signed 16-bit integers in "a" by maximum. Returns the maximum - of all active elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[15:0] > src[31:16] ? src[15:0] : src[31:16]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := (src[i+15:i] > src[i+16*len+15:i+16*len] ? src[i+15:i] : - src[i+16*len+15:i+16*len]) - ENDFOR - RETURN REDUCE_MAX(src[16*len-1:0], len) - } - dst[15:0] := REDUCE_MAX(a, 8) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + Reduce the packed signed 16-bit integers in "a" by maximum. Returns the maximum of all active elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[15:0] > src[31:16] ? src[15:0] : src[31:16]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := (src[i+15:i] > src[i+16*len+15:i+16*len] ? src[i+15:i] : src[i+16*len+15:i+16*len]) + ENDFOR + RETURN REDUCE_MAX(src[16*len-1:0], len) +} +dst[15:0] := REDUCE_MAX(a, 8) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed signed 16-bit integers in "a" by maximum using mask "k". - Returns the maximum of all active elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[15:0] > src[31:16] ? src[15:0] : src[31:16]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := (src[i+15:i] > src[i+16*len+15:i+16*len] ? src[i+15:i] : - src[i+16*len+15:i+16*len]) - ENDFOR - RETURN REDUCE_MAX(src[16*len-1:0], len) - } - tmp := a - FOR j := 0 to 7 - i := j*16 - IF k[j] - tmp[i+15:i] := a[i+15:i] - ELSE - tmp[i+15:i] := Int16(-0x8000) - FI - ENDFOR - dst[15:0] := REDUCE_MAX(tmp, 8) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + + Reduce the packed signed 16-bit integers in "a" by maximum using mask "k". Returns the maximum of all active elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[15:0] > src[31:16] ? src[15:0] : src[31:16]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := (src[i+15:i] > src[i+16*len+15:i+16*len] ? src[i+15:i] : src[i+16*len+15:i+16*len]) + ENDFOR + RETURN REDUCE_MAX(src[16*len-1:0], len) +} +tmp := a +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[i+15:i] := a[i+15:i] + ELSE + tmp[i+15:i] := Int16(-0x8000) + FI +ENDFOR +dst[15:0] := REDUCE_MAX(tmp, 8) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - Reduce the packed signed 16-bit integers in "a" by maximum. Returns the maximum - of all active elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[15:0] > src[31:16] ? src[15:0] : src[31:16]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := (src[i+15:i] > src[i+16*len+15:i+16*len] ? src[i+15:i] : - src[i+16*len+15:i+16*len]) - ENDFOR - RETURN REDUCE_MAX(src[16*len-1:0], len) - } - dst[15:0] := REDUCE_MAX(a, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + Reduce the packed signed 16-bit integers in "a" by maximum. Returns the maximum of all active elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[15:0] > src[31:16] ? src[15:0] : src[31:16]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := (src[i+15:i] > src[i+16*len+15:i+16*len] ? src[i+15:i] : src[i+16*len+15:i+16*len]) + ENDFOR + RETURN REDUCE_MAX(src[16*len-1:0], len) +} +dst[15:0] := REDUCE_MAX(a, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed signed 16-bit integers in "a" by maximum using mask "k". - Returns the maximum of all active elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[15:0] > src[31:16] ? src[15:0] : src[31:16]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := (src[i+15:i] > src[i+16*len+15:i+16*len] ? src[i+15:i] : - src[i+16*len+15:i+16*len]) - ENDFOR - RETURN REDUCE_MAX(src[16*len-1:0], len) - } - tmp := a - FOR j := 0 to 15 - i := j*16 - IF k[j] - tmp[i+15:i] := a[i+15:i] - ELSE - tmp[i+15:i] := Int16(-0x8000) - FI - ENDFOR - dst[15:0] := REDUCE_MAX(tmp, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + + Reduce the packed signed 16-bit integers in "a" by maximum using mask "k". Returns the maximum of all active elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[15:0] > src[31:16] ? src[15:0] : src[31:16]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := (src[i+15:i] > src[i+16*len+15:i+16*len] ? src[i+15:i] : src[i+16*len+15:i+16*len]) + ENDFOR + RETURN REDUCE_MAX(src[16*len-1:0], len) +} +tmp := a +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[i+15:i] := a[i+15:i] + ELSE + tmp[i+15:i] := Int16(-0x8000) + FI +ENDFOR +dst[15:0] := REDUCE_MAX(tmp, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - Reduce the packed signed 8-bit integers in "a" by maximum. Returns the maximum - of all active elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[7:0] > src[15:8] ? src[7:0] : src[15:8]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := (src[i+7:i] > src[i+8*len+7:i+8*len] ? src[i+7:i] : - src[i+8*len+7:i+8*len]) - ENDFOR - RETURN REDUCE_MAX(src[8*len-1:0], len) - } - dst[7:0] := REDUCE_MAX(a, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + Reduce the packed signed 8-bit integers in "a" by maximum. Returns the maximum of all active elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[7:0] > src[15:8] ? src[7:0] : src[15:8]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := (src[i+7:i] > src[i+8*len+7:i+8*len] ? src[i+7:i] : src[i+8*len+7:i+8*len]) + ENDFOR + RETURN REDUCE_MAX(src[8*len-1:0], len) +} +dst[7:0] := REDUCE_MAX(a, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed signed 8-bit integers in "a" by maximum using mask "k". - Returns the maximum of all active elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[7:0] > src[15:8] ? src[7:0] : src[15:8]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := (src[i+7:i] > src[i+8*len+7:i+8*len] ? src[i+7:i] : - src[i+8*len+7:i+8*len]) - ENDFOR - RETURN REDUCE_MAX(src[8*len-1:0], len) - } - tmp := a - FOR j := 0 to 15 - i := j*8 - IF k[j] - tmp[i+7:i] := a[i+7:i] - ELSE - tmp[i+7:i] := Int8(-0x80) - FI - ENDFOR - dst[7:0] := REDUCE_MAX(tmp, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + + Reduce the packed signed 8-bit integers in "a" by maximum using mask "k". Returns the maximum of all active elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[7:0] > src[15:8] ? src[7:0] : src[15:8]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := (src[i+7:i] > src[i+8*len+7:i+8*len] ? src[i+7:i] : src[i+8*len+7:i+8*len]) + ENDFOR + RETURN REDUCE_MAX(src[8*len-1:0], len) +} +tmp := a +FOR j := 0 to 15 + i := j*8 + IF k[j] + tmp[i+7:i] := a[i+7:i] + ELSE + tmp[i+7:i] := Int8(-0x80) + FI +ENDFOR +dst[7:0] := REDUCE_MAX(tmp, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - Reduce the packed signed 8-bit integers in "a" by maximum. Returns the maximum - of all active elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[7:0] > src[15:8] ? src[7:0] : src[15:8]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := (src[i+7:i] > src[i+8*len+7:i+8*len] ? src[i+7:i] : - src[i+8*len+7:i+8*len]) - ENDFOR - RETURN REDUCE_MAX(src[8*len-1:0], len) - } - dst[7:0] := REDUCE_MAX(a, 32) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + Reduce the packed signed 8-bit integers in "a" by maximum. Returns the maximum of all active elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[7:0] > src[15:8] ? src[7:0] : src[15:8]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := (src[i+7:i] > src[i+8*len+7:i+8*len] ? src[i+7:i] : src[i+8*len+7:i+8*len]) + ENDFOR + RETURN REDUCE_MAX(src[8*len-1:0], len) +} +dst[7:0] := REDUCE_MAX(a, 32) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed signed 8-bit integers in "a" by maximum using mask "k". - Returns the maximum of all active elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[7:0] > src[15:8] ? src[7:0] : src[15:8]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := (src[i+7:i] > src[i+8*len+7:i+8*len] ? src[i+7:i] : - src[i+8*len+7:i+8*len]) - ENDFOR - RETURN REDUCE_MAX(src[8*len-1:0], len) - } - tmp := a - FOR j := 0 to 31 - i := j*8 - IF k[j] - tmp[i+7:i] := a[i+7:i] - ELSE - tmp[i+7:i] := Int8(-0x80) - FI - ENDFOR - dst[7:0] := REDUCE_MAX(tmp, 32) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + + Reduce the packed signed 8-bit integers in "a" by maximum using mask "k". Returns the maximum of all active elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[7:0] > src[15:8] ? src[7:0] : src[15:8]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := (src[i+7:i] > src[i+8*len+7:i+8*len] ? src[i+7:i] : src[i+8*len+7:i+8*len]) + ENDFOR + RETURN REDUCE_MAX(src[8*len-1:0], len) +} +tmp := a +FOR j := 0 to 31 + i := j*8 + IF k[j] + tmp[i+7:i] := a[i+7:i] + ELSE + tmp[i+7:i] := Int8(-0x80) + FI +ENDFOR +dst[7:0] := REDUCE_MAX(tmp, 32) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - Reduce the packed unsigned 16-bit integers in "a" by maximum. Returns the - maximum of all active elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[15:0] > src[31:16] ? src[15:0] : src[31:16]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := (src[i+15:i] > src[i+16*len+15:i+16*len] ? src[i+15:i] : - src[i+16*len+15:i+16*len]) - ENDFOR - RETURN REDUCE_MAX(src[16*len-1:0], len) - } - dst[15:0] := REDUCE_MAX(a, 8) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + Reduce the packed unsigned 16-bit integers in "a" by maximum. Returns the maximum of all active elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[15:0] > src[31:16] ? src[15:0] : src[31:16]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := (src[i+15:i] > src[i+16*len+15:i+16*len] ? src[i+15:i] : src[i+16*len+15:i+16*len]) + ENDFOR + RETURN REDUCE_MAX(src[16*len-1:0], len) +} +dst[15:0] := REDUCE_MAX(a, 8) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed unsigned 16-bit integers in "a" by maximum using mask "k". - Returns the maximum of all active elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[15:0] > src[31:16] ? src[15:0] : src[31:16]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := (src[i+15:i] > src[i+16*len+15:i+16*len] ? src[i+15:i] : - src[i+16*len+15:i+16*len]) - ENDFOR - RETURN REDUCE_MAX(src[16*len-1:0], len) - } - tmp := a - FOR j := 0 to 7 - i := j*16 - IF k[j] - tmp[i+15:i] := a[i+15:i] - ELSE - tmp[i+15:i] := 0 - FI - ENDFOR - dst[15:0] := REDUCE_MAX(tmp, 8) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + + Reduce the packed unsigned 16-bit integers in "a" by maximum using mask "k". Returns the maximum of all active elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[15:0] > src[31:16] ? src[15:0] : src[31:16]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := (src[i+15:i] > src[i+16*len+15:i+16*len] ? src[i+15:i] : src[i+16*len+15:i+16*len]) + ENDFOR + RETURN REDUCE_MAX(src[16*len-1:0], len) +} +tmp := a +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[i+15:i] := a[i+15:i] + ELSE + tmp[i+15:i] := 0 + FI +ENDFOR +dst[15:0] := REDUCE_MAX(tmp, 8) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - Reduce the packed unsigned 16-bit integers in "a" by maximum. Returns the - maximum of all active elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[15:0] > src[31:16] ? src[15:0] : src[31:16]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := (src[i+15:i] > src[i+16*len+15:i+16*len] ? src[i+15:i] : - src[i+16*len+15:i+16*len]) - ENDFOR - RETURN REDUCE_MAX(src[16*len-1:0], len) - } - dst[15:0] := REDUCE_MAX(a, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + Reduce the packed unsigned 16-bit integers in "a" by maximum. Returns the maximum of all active elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[15:0] > src[31:16] ? src[15:0] : src[31:16]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := (src[i+15:i] > src[i+16*len+15:i+16*len] ? src[i+15:i] : src[i+16*len+15:i+16*len]) + ENDFOR + RETURN REDUCE_MAX(src[16*len-1:0], len) +} +dst[15:0] := REDUCE_MAX(a, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed unsigned 16-bit integers in "a" by maximum using mask "k". - Returns the maximum of all active elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[15:0] > src[31:16] ? src[15:0] : src[31:16]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := (src[i+15:i] > src[i+16*len+15:i+16*len] ? src[i+15:i] : - src[i+16*len+15:i+16*len]) - ENDFOR - RETURN REDUCE_MAX(src[16*len-1:0], len) - } - tmp := a - FOR j := 0 to 15 - i := j*16 - IF k[j] - tmp[i+15:i] := a[i+15:i] - ELSE - tmp[i+15:i] := 0 - FI - ENDFOR - dst[15:0] := REDUCE_MAX(tmp, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + + Reduce the packed unsigned 16-bit integers in "a" by maximum using mask "k". Returns the maximum of all active elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[15:0] > src[31:16] ? src[15:0] : src[31:16]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := (src[i+15:i] > src[i+16*len+15:i+16*len] ? src[i+15:i] : src[i+16*len+15:i+16*len]) + ENDFOR + RETURN REDUCE_MAX(src[16*len-1:0], len) +} +tmp := a +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[i+15:i] := a[i+15:i] + ELSE + tmp[i+15:i] := 0 + FI +ENDFOR +dst[15:0] := REDUCE_MAX(tmp, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - Reduce the packed unsigned 8-bit integers in "a" by maximum. Returns the - maximum of all active elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[7:0] > src[15:8] ? src[7:0] : src[15:8]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := (src[i+7:i] > src[i+8*len+7:i+8*len] ? src[i+7:i] : - src[i+8*len+7:i+8*len]) - ENDFOR - RETURN REDUCE_MAX(src[8*len-1:0], len) - } - dst[7:0] := REDUCE_MAX(a, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + Reduce the packed unsigned 8-bit integers in "a" by maximum. Returns the maximum of all active elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[7:0] > src[15:8] ? src[7:0] : src[15:8]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := (src[i+7:i] > src[i+8*len+7:i+8*len] ? src[i+7:i] : src[i+8*len+7:i+8*len]) + ENDFOR + RETURN REDUCE_MAX(src[8*len-1:0], len) +} +dst[7:0] := REDUCE_MAX(a, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed unsigned 8-bit integers in "a" by maximum using mask "k". - Returns the maximum of all active elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[7:0] > src[15:8] ? src[7:0] : src[15:8]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := (src[i+7:i] > src[i+8*len+7:i+8*len] ? src[i+7:i] : - src[i+8*len+7:i+8*len]) - ENDFOR - RETURN REDUCE_MAX(src[8*len-1:0], len) - } - tmp := a - FOR j := 0 to 15 - i := j*8 - IF k[j] - tmp[i+7:i] := a[i+7:i] - ELSE - tmp[i+7:i] := 0 - FI - ENDFOR - dst[7:0] := REDUCE_MAX(tmp, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + + Reduce the packed unsigned 8-bit integers in "a" by maximum using mask "k". Returns the maximum of all active elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[7:0] > src[15:8] ? src[7:0] : src[15:8]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := (src[i+7:i] > src[i+8*len+7:i+8*len] ? src[i+7:i] : src[i+8*len+7:i+8*len]) + ENDFOR + RETURN REDUCE_MAX(src[8*len-1:0], len) +} +tmp := a +FOR j := 0 to 15 + i := j*8 + IF k[j] + tmp[i+7:i] := a[i+7:i] + ELSE + tmp[i+7:i] := 0 + FI +ENDFOR +dst[7:0] := REDUCE_MAX(tmp, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - Reduce the packed unsigned 8-bit integers in "a" by maximum. Returns the - maximum of all active elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[7:0] > src[15:8] ? src[7:0] : src[15:8]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := (src[i+7:i] > src[i+8*len+7:i+8*len] ? src[i+7:i] : - src[i+8*len+7:i+8*len]) - ENDFOR - RETURN REDUCE_MAX(src[8*len-1:0], len) - } - dst[7:0] := REDUCE_MAX(a, 32) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + Reduce the packed unsigned 8-bit integers in "a" by maximum. Returns the maximum of all active elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[7:0] > src[15:8] ? src[7:0] : src[15:8]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := (src[i+7:i] > src[i+8*len+7:i+8*len] ? src[i+7:i] : src[i+8*len+7:i+8*len]) + ENDFOR + RETURN REDUCE_MAX(src[8*len-1:0], len) +} +dst[7:0] := REDUCE_MAX(a, 32) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed unsigned 8-bit integers in "a" by maximum using mask "k". - Returns the maximum of all active elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[7:0] > src[15:8] ? src[7:0] : src[15:8]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := (src[i+7:i] > src[i+8*len+7:i+8*len] ? src[i+7:i] : - src[i+8*len+7:i+8*len]) - ENDFOR - RETURN REDUCE_MAX(src[8*len-1:0], len) - } - tmp := a - FOR j := 0 to 31 - i := j*8 - IF k[j] - tmp[i+7:i] := a[i+7:i] - ELSE - tmp[i+7:i] := 0 - FI - ENDFOR - dst[7:0] := REDUCE_MAX(tmp, 32) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + + Reduce the packed unsigned 8-bit integers in "a" by maximum using mask "k". Returns the maximum of all active elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[7:0] > src[15:8] ? src[7:0] : src[15:8]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := (src[i+7:i] > src[i+8*len+7:i+8*len] ? src[i+7:i] : src[i+8*len+7:i+8*len]) + ENDFOR + RETURN REDUCE_MAX(src[8*len-1:0], len) +} +tmp := a +FOR j := 0 to 31 + i := j*8 + IF k[j] + tmp[i+7:i] := a[i+7:i] + ELSE + tmp[i+7:i] := 0 + FI +ENDFOR +dst[7:0] := REDUCE_MAX(tmp, 32) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - Reduce the packed signed 16-bit integers in "a" by minimum. Returns the minimum - of all active elements in "a". - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[15:0] < src[31:16] ? src[15:0] : src[31:16]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := (src[i+15:i] < src[i+16*len+15:i+16*len] ? src[i+15:i] : - src[i+16*len+15:i+16*len]) - ENDFOR - RETURN REDUCE_MIN(src[16*len-1:0], len) - } - dst[15:0] := REDUCE_MIN(a, 8) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + Reduce the packed signed 16-bit integers in "a" by minimum. Returns the minimum of all active elements in "a". + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[15:0] < src[31:16] ? src[15:0] : src[31:16]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := (src[i+15:i] < src[i+16*len+15:i+16*len] ? src[i+15:i] : src[i+16*len+15:i+16*len]) + ENDFOR + RETURN REDUCE_MIN(src[16*len-1:0], len) +} +dst[15:0] := REDUCE_MIN(a, 8) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed signed 16-bit integers in "a" by minimum using mask "k". - Returns the minimum of all active elements in "a". - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[15:0] < src[31:16] ? src[15:0] : src[31:16]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := (src[i+15:i] < src[i+16*len+15:i+16*len] ? src[i+15:i] : - src[i+16*len+15:i+16*len]) - ENDFOR - RETURN REDUCE_MIN(src[16*len-1:0], len) - } - tmp := a - FOR j := 0 to 7 - i := j*16 - IF k[j] - tmp[i+15:i] := a[i+15:i] - ELSE - tmp[i+15:i] := Int16(0x7FFF) - FI - ENDFOR - dst[15:0] := REDUCE_MIN(tmp, 8) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + + Reduce the packed signed 16-bit integers in "a" by minimum using mask "k". Returns the minimum of all active elements in "a". + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[15:0] < src[31:16] ? src[15:0] : src[31:16]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := (src[i+15:i] < src[i+16*len+15:i+16*len] ? src[i+15:i] : src[i+16*len+15:i+16*len]) + ENDFOR + RETURN REDUCE_MIN(src[16*len-1:0], len) +} +tmp := a +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[i+15:i] := a[i+15:i] + ELSE + tmp[i+15:i] := Int16(0x7FFF) + FI +ENDFOR +dst[15:0] := REDUCE_MIN(tmp, 8) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - Reduce the packed signed 16-bit integers in "a" by minimum. Returns the minimum - of all active elements in "a". - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[15:0] < src[31:16] ? src[15:0] : src[31:16]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := (src[i+15:i] < src[i+16*len+15:i+16*len] ? src[i+15:i] : - src[i+16*len+15:i+16*len]) - ENDFOR - RETURN REDUCE_MIN(src[16*len-1:0], len) - } - dst[15:0] := REDUCE_MIN(a, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + Reduce the packed signed 16-bit integers in "a" by minimum. Returns the minimum of all active elements in "a". + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[15:0] < src[31:16] ? src[15:0] : src[31:16]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := (src[i+15:i] < src[i+16*len+15:i+16*len] ? src[i+15:i] : src[i+16*len+15:i+16*len]) + ENDFOR + RETURN REDUCE_MIN(src[16*len-1:0], len) +} +dst[15:0] := REDUCE_MIN(a, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed signed 16-bit integers in "a" by minimum using mask "k". - Returns the minimum of all active elements in "a". - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[15:0] < src[31:16] ? src[15:0] : src[31:16]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := (src[i+15:i] < src[i+16*len+15:i+16*len] ? src[i+15:i] : - src[i+16*len+15:i+16*len]) - ENDFOR - RETURN REDUCE_MIN(src[16*len-1:0], len) - } - tmp := a - FOR j := 0 to 15 - i := j*16 - IF k[j] - tmp[i+15:i] := a[i+15:i] - ELSE - tmp[i+15:i] := Int16(0x7FFF) - FI - ENDFOR - dst[15:0] := REDUCE_MIN(tmp, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + + Reduce the packed signed 16-bit integers in "a" by minimum using mask "k". Returns the minimum of all active elements in "a". + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[15:0] < src[31:16] ? src[15:0] : src[31:16]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := (src[i+15:i] < src[i+16*len+15:i+16*len] ? src[i+15:i] : src[i+16*len+15:i+16*len]) + ENDFOR + RETURN REDUCE_MIN(src[16*len-1:0], len) +} +tmp := a +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[i+15:i] := a[i+15:i] + ELSE + tmp[i+15:i] := Int16(0x7FFF) + FI +ENDFOR +dst[15:0] := REDUCE_MIN(tmp, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - Reduce the packed signed 8-bit integers in "a" by minimum. Returns the minimum - of all active elements in "a". - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[7:0] < src[15:8] ? src[7:0] : src[15:8]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := (src[i+7:i] < src[i+8*len+7:i+8*len] ? src[i+7:i] : - src[i+8*len+7:i+8*len]) - ENDFOR - RETURN REDUCE_MIN(src[8*len-1:0], len) - } - dst[7:0] := REDUCE_MIN(a, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + Reduce the packed signed 8-bit integers in "a" by minimum. Returns the minimum of all active elements in "a". + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[7:0] < src[15:8] ? src[7:0] : src[15:8]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := (src[i+7:i] < src[i+8*len+7:i+8*len] ? src[i+7:i] : src[i+8*len+7:i+8*len]) + ENDFOR + RETURN REDUCE_MIN(src[8*len-1:0], len) +} +dst[7:0] := REDUCE_MIN(a, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed signed 8-bit integers in "a" by minimum using mask "k". - Returns the minimum of all active elements in "a". - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[7:0] < src[15:8] ? src[7:0] : src[15:8]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := (src[i+7:i] < src[i+8*len+7:i+8*len] ? src[i+7:i] : - src[i+8*len+7:i+8*len]) - ENDFOR - RETURN REDUCE_MIN(src[8*len-1:0], len) - } - tmp := a - FOR j := 0 to 15 - i := j*8 - IF k[j] - tmp[i+7:i] := a[i+7:i] - ELSE - tmp[i+7:i] := Int8(0x7F) - FI - ENDFOR - dst[7:0] := REDUCE_MIN(tmp, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + + Reduce the packed signed 8-bit integers in "a" by minimum using mask "k". Returns the minimum of all active elements in "a". + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[7:0] < src[15:8] ? src[7:0] : src[15:8]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := (src[i+7:i] < src[i+8*len+7:i+8*len] ? src[i+7:i] : src[i+8*len+7:i+8*len]) + ENDFOR + RETURN REDUCE_MIN(src[8*len-1:0], len) +} +tmp := a +FOR j := 0 to 15 + i := j*8 + IF k[j] + tmp[i+7:i] := a[i+7:i] + ELSE + tmp[i+7:i] := Int8(0x7F) + FI +ENDFOR +dst[7:0] := REDUCE_MIN(tmp, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - Reduce the packed signed 8-bit integers in "a" by minimum. Returns the minimum - of all active elements in "a". - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[7:0] < src[15:8] ? src[7:0] : src[15:8]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := (src[i+7:i] < src[i+8*len+7:i+8*len] ? src[i+7:i] : - src[i+8*len+7:i+8*len]) - ENDFOR - RETURN REDUCE_MIN(src[8*len-1:0], len) - } - dst[7:0] := REDUCE_MIN(a, 32) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + Reduce the packed signed 8-bit integers in "a" by minimum. Returns the minimum of all active elements in "a". + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[7:0] < src[15:8] ? src[7:0] : src[15:8]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := (src[i+7:i] < src[i+8*len+7:i+8*len] ? src[i+7:i] : src[i+8*len+7:i+8*len]) + ENDFOR + RETURN REDUCE_MIN(src[8*len-1:0], len) +} +dst[7:0] := REDUCE_MIN(a, 32) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed signed 8-bit integers in "a" by minimum using mask "k". - Returns the minimum of all active elements in "a". - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[7:0] < src[15:8] ? src[7:0] : src[15:8]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := (src[i+7:i] < src[i+8*len+7:i+8*len] ? src[i+7:i] : - src[i+8*len+7:i+8*len]) - ENDFOR - RETURN REDUCE_MIN(src[8*len-1:0], len) - } - tmp := a - FOR j := 0 to 31 - i := j*8 - IF k[j] - tmp[i+7:i] := a[i+7:i] - ELSE - tmp[i+7:i] := Int8(0x7F) - FI - ENDFOR - dst[7:0] := REDUCE_MIN(tmp, 32) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + + Reduce the packed signed 8-bit integers in "a" by minimum using mask "k". Returns the minimum of all active elements in "a". + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[7:0] < src[15:8] ? src[7:0] : src[15:8]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := (src[i+7:i] < src[i+8*len+7:i+8*len] ? src[i+7:i] : src[i+8*len+7:i+8*len]) + ENDFOR + RETURN REDUCE_MIN(src[8*len-1:0], len) +} +tmp := a +FOR j := 0 to 31 + i := j*8 + IF k[j] + tmp[i+7:i] := a[i+7:i] + ELSE + tmp[i+7:i] := Int8(0x7F) + FI +ENDFOR +dst[7:0] := REDUCE_MIN(tmp, 32) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - Reduce the packed unsigned 16-bit integers in "a" by minimum. Returns the - minimum of all active elements in "a". - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[15:0] < src[31:16] ? src[15:0] : src[31:16]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := (src[i+15:i] < src[i+16*len+15:i+16*len] ? src[i+15:i] : - src[i+16*len+15:i+16*len]) - ENDFOR - RETURN REDUCE_MIN(src[16*len-1:0], len) - } - dst[15:0] := REDUCE_MIN(a, 8) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + Reduce the packed unsigned 16-bit integers in "a" by minimum. Returns the minimum of all active elements in "a". + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[15:0] < src[31:16] ? src[15:0] : src[31:16]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := (src[i+15:i] < src[i+16*len+15:i+16*len] ? src[i+15:i] : src[i+16*len+15:i+16*len]) + ENDFOR + RETURN REDUCE_MIN(src[16*len-1:0], len) +} +dst[15:0] := REDUCE_MIN(a, 8) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed unsigned 16-bit integers in "a" by minimum using mask "k". - Returns the minimum of all active elements in "a". - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[15:0] < src[31:16] ? src[15:0] : src[31:16]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := (src[i+15:i] < src[i+16*len+15:i+16*len] ? src[i+15:i] : - src[i+16*len+15:i+16*len]) - ENDFOR - RETURN REDUCE_MIN(src[16*len-1:0], len) - } - tmp := a - FOR j := 0 to 7 - i := j*16 - IF k[j] - tmp[i+15:i] := a[i+15:i] - ELSE - tmp[i+15:i] := 0xFFFF - FI - ENDFOR - dst[15:0] := REDUCE_MIN(tmp, 8) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + + Reduce the packed unsigned 16-bit integers in "a" by minimum using mask "k". Returns the minimum of all active elements in "a". + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[15:0] < src[31:16] ? src[15:0] : src[31:16]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := (src[i+15:i] < src[i+16*len+15:i+16*len] ? src[i+15:i] : src[i+16*len+15:i+16*len]) + ENDFOR + RETURN REDUCE_MIN(src[16*len-1:0], len) +} +tmp := a +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[i+15:i] := a[i+15:i] + ELSE + tmp[i+15:i] := 0xFFFF + FI +ENDFOR +dst[15:0] := REDUCE_MIN(tmp, 8) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - Reduce the packed unsigned 16-bit integers in "a" by minimum. Returns the - minimum of all active elements in "a". - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[15:0] < src[31:16] ? src[15:0] : src[31:16]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := (src[i+15:i] < src[i+16*len+15:i+16*len] ? src[i+15:i] : - src[i+16*len+15:i+16*len]) - ENDFOR - RETURN REDUCE_MIN(src[16*len-1:0], len) - } - dst[15:0] := REDUCE_MIN(a, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + Reduce the packed unsigned 16-bit integers in "a" by minimum. Returns the minimum of all active elements in "a". + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[15:0] < src[31:16] ? src[15:0] : src[31:16]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := (src[i+15:i] < src[i+16*len+15:i+16*len] ? src[i+15:i] : src[i+16*len+15:i+16*len]) + ENDFOR + RETURN REDUCE_MIN(src[16*len-1:0], len) +} +dst[15:0] := REDUCE_MIN(a, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed unsigned 16-bit integers in "a" by minimum using mask "k". - Returns the minimum of all active elements in "a". - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[15:0] < src[31:16] ? src[15:0] : src[31:16]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*16 - src[i+15:i] := (src[i+15:i] < src[i+16*len+15:i+16*len] ? src[i+15:i] : - src[i+16*len+15:i+16*len]) - ENDFOR - RETURN REDUCE_MIN(src[16*len-1:0], len) - } - tmp := a - FOR j := 0 to 15 - i := j*16 - IF k[j] - tmp[i+15:i] := a[i+15:i] - ELSE - tmp[i+15:i] := 0xFFFF - FI - ENDFOR - dst[15:0] := REDUCE_MIN(tmp, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + + Reduce the packed unsigned 16-bit integers in "a" by minimum using mask "k". Returns the minimum of all active elements in "a". + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[15:0] < src[31:16] ? src[15:0] : src[31:16]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*16 + src[i+15:i] := (src[i+15:i] < src[i+16*len+15:i+16*len] ? src[i+15:i] : src[i+16*len+15:i+16*len]) + ENDFOR + RETURN REDUCE_MIN(src[16*len-1:0], len) +} +tmp := a +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[i+15:i] := a[i+15:i] + ELSE + tmp[i+15:i] := 0xFFFF + FI +ENDFOR +dst[15:0] := REDUCE_MIN(tmp, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - Reduce the packed unsigned 8-bit integers in "a" by minimum. Returns the - minimum of all active elements in "a". - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[7:0] < src[15:8] ? src[7:0] : src[15:8]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := (src[i+7:i] < src[i+8*len+7:i+8*len] ? src[i+7:i] : - src[i+8*len+7:i+8*len]) - ENDFOR - RETURN REDUCE_MIN(src[8*len-1:0], len) - } - dst[7:0] := REDUCE_MIN(a, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + Reduce the packed unsigned 8-bit integers in "a" by minimum. Returns the minimum of all active elements in "a". + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[7:0] < src[15:8] ? src[7:0] : src[15:8]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := (src[i+7:i] < src[i+8*len+7:i+8*len] ? src[i+7:i] : src[i+8*len+7:i+8*len]) + ENDFOR + RETURN REDUCE_MIN(src[8*len-1:0], len) +} +dst[7:0] := REDUCE_MIN(a, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed unsigned 8-bit integers in "a" by minimum using mask "k". - Returns the minimum of all active elements in "a". - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[7:0] < src[15:8] ? src[7:0] : src[15:8]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := (src[i+7:i] < src[i+8*len+7:i+8*len] ? src[i+7:i] : - src[i+8*len+7:i+8*len]) - ENDFOR - RETURN REDUCE_MIN(src[8*len-1:0], len) - } - tmp := a - FOR j := 0 to 15 - i := j*8 - IF k[j] - tmp[i+7:i] := a[i+7:i] - ELSE - tmp[i+7:i] := 0xFF - FI - ENDFOR - dst[7:0] := REDUCE_MIN(tmp, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + + Reduce the packed unsigned 8-bit integers in "a" by minimum using mask "k". Returns the minimum of all active elements in "a". + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[7:0] < src[15:8] ? src[7:0] : src[15:8]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := (src[i+7:i] < src[i+8*len+7:i+8*len] ? src[i+7:i] : src[i+8*len+7:i+8*len]) + ENDFOR + RETURN REDUCE_MIN(src[8*len-1:0], len) +} +tmp := a +FOR j := 0 to 15 + i := j*8 + IF k[j] + tmp[i+7:i] := a[i+7:i] + ELSE + tmp[i+7:i] := 0xFF + FI +ENDFOR +dst[7:0] := REDUCE_MIN(tmp, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - Reduce the packed unsigned 8-bit integers in "a" by minimum. Returns the - minimum of all active elements in "a". - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[7:0] < src[15:8] ? src[7:0] : src[15:8]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := (src[i+7:i] < src[i+8*len+7:i+8*len] ? src[i+7:i] : - src[i+8*len+7:i+8*len]) - ENDFOR - RETURN REDUCE_MIN(src[8*len-1:0], len) - } - dst[7:0] := REDUCE_MIN(a, 32) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions + + + Reduce the packed unsigned 8-bit integers in "a" by minimum. Returns the minimum of all active elements in "a". + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[7:0] < src[15:8] ? src[7:0] : src[15:8]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := (src[i+7:i] < src[i+8*len+7:i+8*len] ? src[i+7:i] : src[i+8*len+7:i+8*len]) + ENDFOR + RETURN REDUCE_MIN(src[8*len-1:0], len) +} +dst[7:0] := REDUCE_MIN(a, 32) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed unsigned 8-bit integers in "a" by minimum using mask "k". - Returns the minimum of all active elements in "a". - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[7:0] < src[15:8] ? src[7:0] : src[15:8]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*8 - src[i+7:i] := (src[i+7:i] < src[i+8*len+7:i+8*len] ? src[i+7:i] : - src[i+8*len+7:i+8*len]) - ENDFOR - RETURN REDUCE_MIN(src[8*len-1:0], len) - } - tmp := a - FOR j := 0 to 15 - i := j*8 - IF k[j] - tmp[i+7:i] := a[i+7:i] - ELSE - tmp[i+7:i] := 0xFF - FI - ENDFOR - dst[7:0] := REDUCE_MIN(tmp, 16) - - AVX512BW - AVX512VL -
immintrin.h
- Special Math Functions -
- - - - - - Unpack and interleave 32 bits from masks "a" and "b", and store the 64-bit - result in "dst". - - dst[31:0] := b[31:0] - dst[63:32] := a[31:0] - dst[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + Reduce the packed unsigned 8-bit integers in "a" by minimum using mask "k". Returns the minimum of all active elements in "a". + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[7:0] < src[15:8] ? src[7:0] : src[15:8]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*8 + src[i+7:i] := (src[i+7:i] < src[i+8*len+7:i+8*len] ? src[i+7:i] : src[i+8*len+7:i+8*len]) + ENDFOR + RETURN REDUCE_MIN(src[8*len-1:0], len) +} +tmp := a +FOR j := 0 to 15 + i := j*8 + IF k[j] + tmp[i+7:i] := a[i+7:i] + ELSE + tmp[i+7:i] := 0xFF + FI +ENDFOR +dst[7:0] := REDUCE_MIN(tmp, 16) + + AVX512BW + AVX512VL +
immintrin.h
+ Special Math Functions +
+ + + + + + Unpack and interleave 32 bits from masks "a" and "b", and store the 64-bit result in "dst". + +dst[31:0] := b[31:0] +dst[63:32] := a[31:0] +dst[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - Unpack and interleave 16 bits from masks "a" and "b", and store the 32-bit - result in "dst". - - dst[15:0] := b[15:0] - dst[31:16] := a[15:0] - dst[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + Unpack and interleave 16 bits from masks "a" and "b", and store the 32-bit result in "dst". + +dst[15:0] := b[15:0] +dst[31:16] := a[15:0] +dst[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit - integers in "a" compared to those in "b", and store the 16-bit results in "dst". - Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two - SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the - uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected from - within 128-bit lanes according to the control in "imm8", and each SAD in each 64-bit - lane uses the selected quadruplet at 8-bit offsets. - - FOR i := 0 to 3 - tmp.m128[i].dword[0] := b.m128[i].dword[ imm8[1:0] ] - tmp.m128[i].dword[1] := b.m128[i].dword[ imm8[3:2] ] - tmp.m128[i].dword[2] := b.m128[i].dword[ imm8[5:4] ] - tmp.m128[i].dword[3] := b.m128[i].dword[ imm8[7:6] ] - ENDFOR - FOR j := 0 to 7 - i := j*64 - dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\ - ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) - - dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) +\ - ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) - - dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - - tmp[i+31:i+24]) +\ - ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) - - dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - - tmp[i+39:i+32]) +\ - ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst". + Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected from within 128-bit lanes according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets. + +FOR i := 0 to 3 + tmp.m128[i].dword[0] := b.m128[i].dword[ imm8[1:0] ] + tmp.m128[i].dword[1] := b.m128[i].dword[ imm8[3:2] ] + tmp.m128[i].dword[2] := b.m128[i].dword[ imm8[5:4] ] + tmp.m128[i].dword[3] := b.m128[i].dword[ imm8[7:6] ] +ENDFOR +FOR j := 0 to 7 + i := j*64 + dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\ + ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) + + dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) +\ + ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) + + dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) +\ + ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) + + dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) +\ + ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - - - Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit - integers in "a" compared to those in "b", and store the 16-bit results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two - SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the - uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected from - within 128-bit lanes according to the control in "imm8", and each SAD in each 64-bit - lane uses the selected quadruplet at 8-bit offsets. - - FOR i := 0 to 3 - tmp.m128[i].dword[0] := b.m128[i].dword[ imm8[1:0] ] - tmp.m128[i].dword[1] := b.m128[i].dword[ imm8[3:2] ] - tmp.m128[i].dword[2] := b.m128[i].dword[ imm8[5:4] ] - tmp.m128[i].dword[3] := b.m128[i].dword[ imm8[7:6] ] - ENDFOR - FOR j := 0 to 7 - i := j*64 - tmp_dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\ - ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) - tmp_dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) - +\ - ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) - tmp_dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - - tmp[i+31:i+24]) +\ - ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) - tmp_dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - - tmp[i+39:i+32]) +\ - ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) - ENDFOR - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + + + Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected from within 128-bit lanes according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets. + +FOR i := 0 to 3 + tmp.m128[i].dword[0] := b.m128[i].dword[ imm8[1:0] ] + tmp.m128[i].dword[1] := b.m128[i].dword[ imm8[3:2] ] + tmp.m128[i].dword[2] := b.m128[i].dword[ imm8[5:4] ] + tmp.m128[i].dword[3] := b.m128[i].dword[ imm8[7:6] ] +ENDFOR +FOR j := 0 to 7 + i := j*64 + tmp_dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\ + ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) + + tmp_dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) +\ + ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) + + tmp_dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) +\ + ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) + + tmp_dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) +\ + ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) +ENDFOR +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - - Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit - integers in "a" compared to those in "b", and store the 16-bit results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two - SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the - uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected from - within 128-bit lanes according to the control in "imm8", and each SAD in each 64-bit - lane uses the selected quadruplet at 8-bit offsets. - - FOR i := 0 to 3 - tmp.m128[i].dword[0] := b.m128[i].dword[ imm8[1:0] ] - tmp.m128[i].dword[1] := b.m128[i].dword[ imm8[3:2] ] - tmp.m128[i].dword[2] := b.m128[i].dword[ imm8[5:4] ] - tmp.m128[i].dword[3] := b.m128[i].dword[ imm8[7:6] ] - ENDFOR - FOR j := 0 to 7 - i := j*64 - tmp_dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\ - ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) - tmp_dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) - +\ - ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) - tmp_dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - - tmp[i+31:i+24]) +\ - ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) - tmp_dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - - tmp[i+39:i+32]) +\ - ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) - ENDFOR - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + + Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected from within 128-bit lanes according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets. + +FOR i := 0 to 3 + tmp.m128[i].dword[0] := b.m128[i].dword[ imm8[1:0] ] + tmp.m128[i].dword[1] := b.m128[i].dword[ imm8[3:2] ] + tmp.m128[i].dword[2] := b.m128[i].dword[ imm8[5:4] ] + tmp.m128[i].dword[3] := b.m128[i].dword[ imm8[7:6] ] +ENDFOR +FOR j := 0 to 7 + i := j*64 + tmp_dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\ + ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24]) + + tmp_dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) +\ + ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32]) + + tmp_dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) +\ + ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40]) + + tmp_dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) +\ + ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48]) +ENDFOR +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary - result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst". - - FOR j := 0 to 3 - i := j*128 - tmp[255:0] := ((a[i+127:i] << 128)[255:0] OR b[i+127:i]) >> (imm8*8) - dst[i+127:i] := tmp[127:0] - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst". + +FOR j := 0 to 3 + i := j*128 + tmp[255:0] := ((a[i+127:i] << 128)[255:0] OR b[i+127:i]) >> (imm8*8) + dst[i+127:i] := tmp[127:0] +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - - - Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary - result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 3 - i := j*128 - tmp[255:0] := ((a[i+127:i] << 128)[255:0] OR b[i+127:i]) >> (imm8*8) - tmp_dst[i+127:i] := tmp[127:0] - ENDFOR - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + + + Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*128 + tmp[255:0] := ((a[i+127:i] << 128)[255:0] OR b[i+127:i]) >> (imm8*8) + tmp_dst[i+127:i] := tmp[127:0] +ENDFOR +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - - Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary - result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*128 - tmp[255:0] := ((a[i+127:i] << 128)[255:0] OR b[i+127:i]) >> (imm8*8) - tmp_dst[i+127:i] := tmp[127:0] - ENDFOR - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + + Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*128 + tmp[255:0] := ((a[i+127:i] << 128)[255:0] OR b[i+127:i]) >> (imm8*8) + tmp_dst[i+127:i] := tmp[127:0] +ENDFOR +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - Blend packed 8-bit integers from "a" and "b" using control mask "k", and store - the results in "dst". - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := b[i+7:i] - ELSE - dst[i+7:i] := a[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + Blend packed 8-bit integers from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := b[i+7:i] + ELSE + dst[i+7:i] := a[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - Blend packed 16-bit integers from "a" and "b" using control mask "k", and store - the results in "dst". - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := b[i+15:i] - ELSE - dst[i+15:i] := a[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + Blend packed 16-bit integers from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := b[i+15:i] + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - Broadcast the low packed 8-bit integer from "a" to all elements of "dst". - - FOR j := 0 to 63 - i := j*8 - dst[i+7:i] := a[7:0] - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + Broadcast the low packed 8-bit integer from "a" to all elements of "dst". + +FOR j := 0 to 63 + i := j*8 + dst[i+7:i] := a[7:0] +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - Broadcast the low packed 8-bit integer from "a" to all elements of "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := a[7:0] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + Broadcast the low packed 8-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := a[7:0] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - Broadcast the low packed 8-bit integer from "a" to all elements of "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := a[7:0] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + Broadcast the low packed 8-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := a[7:0] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - Broadcast the low packed 16-bit integer from "a" to all elements of "dst". - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := a[15:0] - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + Broadcast the low packed 16-bit integer from "a" to all elements of "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := a[15:0] +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := a[15:0] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := a[15:0] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := a[15:0] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := a[15:0] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst" using writemask "k" - (elements are copied from "idx" when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - off := 16*idx[i+4:i] - dst[i+15:i] := idx[i+5] ? b[off+15:off] : a[off+15:off] - ELSE - dst[i+15:i] := idx[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + + Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + off := 16*idx[i+4:i] + dst[i+15:i] := idx[i+5] ? b[off+15:off] : a[off+15:off] + ELSE + dst[i+15:i] := idx[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst" using writemask "k" - (elements are copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - off := 16*idx[i+4:i] - dst[i+15:i] := idx[i+5] ? b[off+15:off] : a[off+15:off] - ELSE - dst[i+15:i] := a[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + + Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + off := 16*idx[i+4:i] + dst[i+15:i] := idx[i+5] ? b[off+15:off] : a[off+15:off] + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - off := 16*idx[i+4:i] - dst[i+15:i] := idx[i+5] ? b[off+15:off] : a[off+15:off] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + + Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + off := 16*idx[i+4:i] + dst[i+15:i] := idx[i+5] ? b[off+15:off] : a[off+15:off] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst". - - FOR j := 0 to 31 - i := j*16 - off := 16*idx[i+4:i] - dst[i+15:i] := idx[i+5] ? b[off+15:off] : a[off+15:off] - ENDFOR - dst[MAX:512] := 0 - - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + off := 16*idx[i+4:i] + dst[i+15:i] := idx[i+5] ? b[off+15:off] : a[off+15:off] +ENDFOR +dst[MAX:512] := 0 + + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 16-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - id := idx[i+4:i]*16 - IF k[j] - dst[i+15:i] := a[id+15:id] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + + Shuffle 16-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + id := idx[i+4:i]*16 + IF k[j] + dst[i+15:i] := a[id+15:id] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - Shuffle 16-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - id := idx[i+4:i]*16 - IF k[j] - dst[i+15:i] := a[id+15:id] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + Shuffle 16-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + id := idx[i+4:i]*16 + IF k[j] + dst[i+15:i] := a[id+15:id] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - Shuffle 16-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst". - - FOR j := 0 to 31 - i := j*16 - id := idx[i+4:i]*16 - dst[i+15:i] := a[id+15:id] - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + Shuffle 16-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + id := idx[i+4:i]*16 + dst[i+15:i] := a[id+15:id] +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - Set each bit of mask register "k" based on the most significant bit of the - corresponding packed 8-bit integer in "a". - - FOR j := 0 to 63 - i := j*8 - IF a[i+7] - k[j] := 1 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + Set each bit of mask register "k" based on the most significant bit of the corresponding packed 8-bit integer in "a". + +FOR j := 0 to 63 + i := j*8 + IF a[i+7] + k[j] := 1 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - Set each packed 8-bit integer in "dst" to all ones or all zeros based on the - value of the corresponding bit in "k". - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := 0xFF - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + Set each packed 8-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k". + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := 0xFF + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - Set each packed 16-bit integer in "dst" to all ones or all zeros based on the - value of the corresponding bit in "k". - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := 0xFFFF - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + Set each packed 16-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k". + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := 0xFFFF + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - Set each bit of mask register "k" based on the most significant bit of the - corresponding packed 16-bit integer in "a". - - FOR j := 0 to 31 - i := j*16 - IF a[i+15] - k[j] := 1 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + Set each bit of mask register "k" based on the most significant bit of the corresponding packed 16-bit integer in "a". + +FOR j := 0 to 31 + i := j*16 + IF a[i+15] + k[j] := 1 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - Compute the absolute differences of packed unsigned 8-bit integers in "a" and - "b", then horizontally sum each consecutive 8 differences to produce eight unsigned - 16-bit integers, and pack these unsigned 16-bit integers in the low 16 bits of 64-bit - elements in "dst". - - FOR j := 0 to 63 - i := j*8 - tmp[i+7:i] := ABS(a[i+7:i] - b[i+7:i]) - ENDFOR - FOR j := 0 to 7 - i := j*64 - dst[i+15:i] := tmp[i+7:i] + tmp[i+15:i+8] + tmp[i+23:i+16] + tmp[i+31:i+24] + \ - tmp[i+39:i+32] + tmp[i+47:i+40] + tmp[i+55:i+48] + tmp[i+63:i+56] - dst[i+63:i+16] := 0 - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + Compute the absolute differences of packed unsigned 8-bit integers in "a" and "b", then horizontally sum each consecutive 8 differences to produce eight unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low 16 bits of 64-bit elements in "dst". + +FOR j := 0 to 63 + i := j*8 + tmp[i+7:i] := ABS(a[i+7:i] - b[i+7:i]) +ENDFOR +FOR j := 0 to 7 + i := j*64 + dst[i+15:i] := tmp[i+7:i] + tmp[i+15:i+8] + tmp[i+23:i+16] + tmp[i+31:i+24] + \ + tmp[i+39:i+32] + tmp[i+47:i+40] + tmp[i+55:i+48] + tmp[i+63:i+56] + dst[i+63:i+16] := 0 +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 8-bit integers in "a" within 128-bit lanes using the control in the - corresponding 8-bit element of "b", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - IF b[i+7] == 1 + + + + + + Shuffle 8-bit integers in "a" within 128-bit lanes using the control in the corresponding 8-bit element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + IF b[i+7] == 1 dst[i+7:i] := 0 - ELSE + ELSE index[5:0] := b[i+3:i] + (j & 0x30) dst[i+7:i] := a[index*8+7:index*8] - FI - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Swizzle + FI + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512BW +
immintrin.h
+ Swizzle
- - - - - Shuffle packed 8-bit integers in "a" according to shuffle control mask in the - corresponding 8-bit element of "b", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - IF b[i+7] == 1 + + + + + Shuffle packed 8-bit integers in "a" according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + IF b[i+7] == 1 dst[i+7:i] := 0 - ELSE + ELSE index[5:0] := b[i+3:i] + (j & 0x30) dst[i+7:i] := a[index*8+7:index*8] - FI - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Swizzle + FI + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512BW +
immintrin.h
+ Swizzle
- - - - Shuffle packed 8-bit integers in "a" according to shuffle control mask in the - corresponding 8-bit element of "b", and store the results in "dst". - - FOR j := 0 to 63 - i := j*8 - IF b[i+7] == 1 - dst[i+7:i] := 0 - ELSE - index[5:0] := b[i+3:i] + (j & 0x30) - dst[i+7:i] := a[index*8+7:index*8] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Swizzle + + + + Shuffle packed 8-bit integers in "a" according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst". + +FOR j := 0 to 63 + i := j*8 + IF b[i+7] == 1 + dst[i+7:i] := 0 + ELSE + index[5:0] := b[i+3:i] + (j & 0x30) + dst[i+7:i] := a[index*8+7:index*8] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Swizzle
- - - - - - Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of "a" using the - control in "imm8". Store the results in the high 64 bits of 128-bit lanes of "dst", with - the low 64 bits of 128-bit lanes being copied from from "a" to "dst", using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - tmp_dst[63:0] := a[63:0] - tmp_dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] - tmp_dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] - tmp_dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] - tmp_dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] - tmp_dst[191:128] := a[191:128] - tmp_dst[207:192] := (a >> (imm8[1:0] * 16))[207:192] - tmp_dst[223:208] := (a >> (imm8[3:2] * 16))[207:192] - tmp_dst[239:224] := (a >> (imm8[5:4] * 16))[207:192] - tmp_dst[255:240] := (a >> (imm8[7:6] * 16))[207:192] - tmp_dst[319:256] := a[319:256] - tmp_dst[335:320] := (a >> (imm8[1:0] * 16))[335:320] - tmp_dst[351:336] := (a >> (imm8[3:2] * 16))[335:320] - tmp_dst[367:352] := (a >> (imm8[5:4] * 16))[335:320] - tmp_dst[383:368] := (a >> (imm8[7:6] * 16))[335:320] - tmp_dst[447:384] := a[447:384] - tmp_dst[463:448] := (a >> (imm8[1:0] * 16))[463:448] - tmp_dst[479:464] := (a >> (imm8[3:2] * 16))[463:448] - tmp_dst[495:480] := (a >> (imm8[5:4] * 16))[463:448] - tmp_dst[511:496] := (a >> (imm8[7:6] * 16))[463:448] - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + + Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the high 64 bits of 128-bit lanes of "dst", with the low 64 bits of 128-bit lanes being copied from from "a" to "dst", using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[63:0] := a[63:0] +tmp_dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] +tmp_dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] +tmp_dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] +tmp_dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] +tmp_dst[191:128] := a[191:128] +tmp_dst[207:192] := (a >> (imm8[1:0] * 16))[207:192] +tmp_dst[223:208] := (a >> (imm8[3:2] * 16))[207:192] +tmp_dst[239:224] := (a >> (imm8[5:4] * 16))[207:192] +tmp_dst[255:240] := (a >> (imm8[7:6] * 16))[207:192] +tmp_dst[319:256] := a[319:256] +tmp_dst[335:320] := (a >> (imm8[1:0] * 16))[335:320] +tmp_dst[351:336] := (a >> (imm8[3:2] * 16))[335:320] +tmp_dst[367:352] := (a >> (imm8[5:4] * 16))[335:320] +tmp_dst[383:368] := (a >> (imm8[7:6] * 16))[335:320] +tmp_dst[447:384] := a[447:384] +tmp_dst[463:448] := (a >> (imm8[1:0] * 16))[463:448] +tmp_dst[479:464] := (a >> (imm8[3:2] * 16))[463:448] +tmp_dst[495:480] := (a >> (imm8[5:4] * 16))[463:448] +tmp_dst[511:496] := (a >> (imm8[7:6] * 16))[463:448] +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of "a" using the - control in "imm8". Store the results in the high 64 bits of 128-bit lanes of "dst", with - the low 64 bits of 128-bit lanes being copied from from "a" to "dst", using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - tmp_dst[63:0] := a[63:0] - tmp_dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] - tmp_dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] - tmp_dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] - tmp_dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] - tmp_dst[191:128] := a[191:128] - tmp_dst[207:192] := (a >> (imm8[1:0] * 16))[207:192] - tmp_dst[223:208] := (a >> (imm8[3:2] * 16))[207:192] - tmp_dst[239:224] := (a >> (imm8[5:4] * 16))[207:192] - tmp_dst[255:240] := (a >> (imm8[7:6] * 16))[207:192] - tmp_dst[319:256] := a[319:256] - tmp_dst[335:320] := (a >> (imm8[1:0] * 16))[335:320] - tmp_dst[351:336] := (a >> (imm8[3:2] * 16))[335:320] - tmp_dst[367:352] := (a >> (imm8[5:4] * 16))[335:320] - tmp_dst[383:368] := (a >> (imm8[7:6] * 16))[335:320] - tmp_dst[447:384] := a[447:384] - tmp_dst[463:448] := (a >> (imm8[1:0] * 16))[463:448] - tmp_dst[479:464] := (a >> (imm8[3:2] * 16))[463:448] - tmp_dst[495:480] := (a >> (imm8[5:4] * 16))[463:448] - tmp_dst[511:496] := (a >> (imm8[7:6] * 16))[463:448] - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the high 64 bits of 128-bit lanes of "dst", with the low 64 bits of 128-bit lanes being copied from from "a" to "dst", using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[63:0] := a[63:0] +tmp_dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] +tmp_dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] +tmp_dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] +tmp_dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] +tmp_dst[191:128] := a[191:128] +tmp_dst[207:192] := (a >> (imm8[1:0] * 16))[207:192] +tmp_dst[223:208] := (a >> (imm8[3:2] * 16))[207:192] +tmp_dst[239:224] := (a >> (imm8[5:4] * 16))[207:192] +tmp_dst[255:240] := (a >> (imm8[7:6] * 16))[207:192] +tmp_dst[319:256] := a[319:256] +tmp_dst[335:320] := (a >> (imm8[1:0] * 16))[335:320] +tmp_dst[351:336] := (a >> (imm8[3:2] * 16))[335:320] +tmp_dst[367:352] := (a >> (imm8[5:4] * 16))[335:320] +tmp_dst[383:368] := (a >> (imm8[7:6] * 16))[335:320] +tmp_dst[447:384] := a[447:384] +tmp_dst[463:448] := (a >> (imm8[1:0] * 16))[463:448] +tmp_dst[479:464] := (a >> (imm8[3:2] * 16))[463:448] +tmp_dst[495:480] := (a >> (imm8[5:4] * 16))[463:448] +tmp_dst[511:496] := (a >> (imm8[7:6] * 16))[463:448] +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of "a" using the - control in "imm8". Store the results in the high 64 bits of 128-bit lanes of "dst", with - the low 64 bits of 128-bit lanes being copied from from "a" to "dst". - - dst[63:0] := a[63:0] - dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] - dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] - dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] - dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] - dst[191:128] := a[191:128] - dst[207:192] := (a >> (imm8[1:0] * 16))[207:192] - dst[223:208] := (a >> (imm8[3:2] * 16))[207:192] - dst[239:224] := (a >> (imm8[5:4] * 16))[207:192] - dst[255:240] := (a >> (imm8[7:6] * 16))[207:192] - dst[319:256] := a[319:256] - dst[335:320] := (a >> (imm8[1:0] * 16))[335:320] - dst[351:336] := (a >> (imm8[3:2] * 16))[335:320] - dst[367:352] := (a >> (imm8[5:4] * 16))[335:320] - dst[383:368] := (a >> (imm8[7:6] * 16))[335:320] - dst[447:384] := a[447:384] - dst[463:448] := (a >> (imm8[1:0] * 16))[463:448] - dst[479:464] := (a >> (imm8[3:2] * 16))[463:448] - dst[495:480] := (a >> (imm8[5:4] * 16))[463:448] - dst[511:496] := (a >> (imm8[7:6] * 16))[463:448] - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the high 64 bits of 128-bit lanes of "dst", with the low 64 bits of 128-bit lanes being copied from from "a" to "dst". + +dst[63:0] := a[63:0] +dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] +dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] +dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] +dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] +dst[191:128] := a[191:128] +dst[207:192] := (a >> (imm8[1:0] * 16))[207:192] +dst[223:208] := (a >> (imm8[3:2] * 16))[207:192] +dst[239:224] := (a >> (imm8[5:4] * 16))[207:192] +dst[255:240] := (a >> (imm8[7:6] * 16))[207:192] +dst[319:256] := a[319:256] +dst[335:320] := (a >> (imm8[1:0] * 16))[335:320] +dst[351:336] := (a >> (imm8[3:2] * 16))[335:320] +dst[367:352] := (a >> (imm8[5:4] * 16))[335:320] +dst[383:368] := (a >> (imm8[7:6] * 16))[335:320] +dst[447:384] := a[447:384] +dst[463:448] := (a >> (imm8[1:0] * 16))[463:448] +dst[479:464] := (a >> (imm8[3:2] * 16))[463:448] +dst[495:480] := (a >> (imm8[5:4] * 16))[463:448] +dst[511:496] := (a >> (imm8[7:6] * 16))[463:448] +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of "a" using the - control in "imm8". Store the results in the low 64 bits of 128-bit lanes of "dst", with - the high 64 bits of 128-bit lanes being copied from from "a" to "dst", using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - tmp_dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] - tmp_dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] - tmp_dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] - tmp_dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] - tmp_dst[127:64] := a[127:64] - tmp_dst[143:128] := (a >> (imm8[1:0] * 16))[143:128] - tmp_dst[159:144] := (a >> (imm8[3:2] * 16))[143:128] - tmp_dst[175:160] := (a >> (imm8[5:4] * 16))[143:128] - tmp_dst[191:176] := (a >> (imm8[7:6] * 16))[143:128] - tmp_dst[255:192] := a[255:192] - tmp_dst[271:256] := (a >> (imm8[1:0] * 16))[271:256] - tmp_dst[287:272] := (a >> (imm8[3:2] * 16))[271:256] - tmp_dst[303:288] := (a >> (imm8[5:4] * 16))[271:256] - tmp_dst[319:304] := (a >> (imm8[7:6] * 16))[271:256] - tmp_dst[383:320] := a[383:320] - tmp_dst[399:384] := (a >> (imm8[1:0] * 16))[399:384] - tmp_dst[415:400] := (a >> (imm8[3:2] * 16))[399:384] - tmp_dst[431:416] := (a >> (imm8[5:4] * 16))[399:384] - tmp_dst[447:432] := (a >> (imm8[7:6] * 16))[399:384] - tmp_dst[511:448] := a[511:448] - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + + Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the low 64 bits of 128-bit lanes of "dst", with the high 64 bits of 128-bit lanes being copied from from "a" to "dst", using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] +tmp_dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] +tmp_dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] +tmp_dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] +tmp_dst[127:64] := a[127:64] +tmp_dst[143:128] := (a >> (imm8[1:0] * 16))[143:128] +tmp_dst[159:144] := (a >> (imm8[3:2] * 16))[143:128] +tmp_dst[175:160] := (a >> (imm8[5:4] * 16))[143:128] +tmp_dst[191:176] := (a >> (imm8[7:6] * 16))[143:128] +tmp_dst[255:192] := a[255:192] +tmp_dst[271:256] := (a >> (imm8[1:0] * 16))[271:256] +tmp_dst[287:272] := (a >> (imm8[3:2] * 16))[271:256] +tmp_dst[303:288] := (a >> (imm8[5:4] * 16))[271:256] +tmp_dst[319:304] := (a >> (imm8[7:6] * 16))[271:256] +tmp_dst[383:320] := a[383:320] +tmp_dst[399:384] := (a >> (imm8[1:0] * 16))[399:384] +tmp_dst[415:400] := (a >> (imm8[3:2] * 16))[399:384] +tmp_dst[431:416] := (a >> (imm8[5:4] * 16))[399:384] +tmp_dst[447:432] := (a >> (imm8[7:6] * 16))[399:384] +tmp_dst[511:448] := a[511:448] +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of "a" using the - control in "imm8". Store the results in the low 64 bits of 128-bit lanes of "dst", with - the high 64 bits of 128-bit lanes being copied from from "a" to "dst", using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - tmp_dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] - tmp_dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] - tmp_dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] - tmp_dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] - tmp_dst[127:64] := a[127:64] - tmp_dst[143:128] := (a >> (imm8[1:0] * 16))[143:128] - tmp_dst[159:144] := (a >> (imm8[3:2] * 16))[143:128] - tmp_dst[175:160] := (a >> (imm8[5:4] * 16))[143:128] - tmp_dst[191:176] := (a >> (imm8[7:6] * 16))[143:128] - tmp_dst[255:192] := a[255:192] - tmp_dst[271:256] := (a >> (imm8[1:0] * 16))[271:256] - tmp_dst[287:272] := (a >> (imm8[3:2] * 16))[271:256] - tmp_dst[303:288] := (a >> (imm8[5:4] * 16))[271:256] - tmp_dst[319:304] := (a >> (imm8[7:6] * 16))[271:256] - tmp_dst[383:320] := a[383:320] - tmp_dst[399:384] := (a >> (imm8[1:0] * 16))[399:384] - tmp_dst[415:400] := (a >> (imm8[3:2] * 16))[399:384] - tmp_dst[431:416] := (a >> (imm8[5:4] * 16))[399:384] - tmp_dst[447:432] := (a >> (imm8[7:6] * 16))[399:384] - tmp_dst[511:448] := a[511:448] - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the low 64 bits of 128-bit lanes of "dst", with the high 64 bits of 128-bit lanes being copied from from "a" to "dst", using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] +tmp_dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] +tmp_dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] +tmp_dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] +tmp_dst[127:64] := a[127:64] +tmp_dst[143:128] := (a >> (imm8[1:0] * 16))[143:128] +tmp_dst[159:144] := (a >> (imm8[3:2] * 16))[143:128] +tmp_dst[175:160] := (a >> (imm8[5:4] * 16))[143:128] +tmp_dst[191:176] := (a >> (imm8[7:6] * 16))[143:128] +tmp_dst[255:192] := a[255:192] +tmp_dst[271:256] := (a >> (imm8[1:0] * 16))[271:256] +tmp_dst[287:272] := (a >> (imm8[3:2] * 16))[271:256] +tmp_dst[303:288] := (a >> (imm8[5:4] * 16))[271:256] +tmp_dst[319:304] := (a >> (imm8[7:6] * 16))[271:256] +tmp_dst[383:320] := a[383:320] +tmp_dst[399:384] := (a >> (imm8[1:0] * 16))[399:384] +tmp_dst[415:400] := (a >> (imm8[3:2] * 16))[399:384] +tmp_dst[431:416] := (a >> (imm8[5:4] * 16))[399:384] +tmp_dst[447:432] := (a >> (imm8[7:6] * 16))[399:384] +tmp_dst[511:448] := a[511:448] +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of "a" using the - control in "imm8". Store the results in the low 64 bits of 128-bit lanes of "dst", with - the high 64 bits of 128-bit lanes being copied from from "a" to "dst". - - dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] - dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] - dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] - dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] - dst[127:64] := a[127:64] - dst[143:128] := (a >> (imm8[1:0] * 16))[143:128] - dst[159:144] := (a >> (imm8[3:2] * 16))[143:128] - dst[175:160] := (a >> (imm8[5:4] * 16))[143:128] - dst[191:176] := (a >> (imm8[7:6] * 16))[143:128] - dst[255:192] := a[255:192] - dst[271:256] := (a >> (imm8[1:0] * 16))[271:256] - dst[287:272] := (a >> (imm8[3:2] * 16))[271:256] - dst[303:288] := (a >> (imm8[5:4] * 16))[271:256] - dst[319:304] := (a >> (imm8[7:6] * 16))[271:256] - dst[383:320] := a[383:320] - dst[399:384] := (a >> (imm8[1:0] * 16))[399:384] - dst[415:400] := (a >> (imm8[3:2] * 16))[399:384] - dst[431:416] := (a >> (imm8[5:4] * 16))[399:384] - dst[447:432] := (a >> (imm8[7:6] * 16))[399:384] - dst[511:448] := a[511:448] - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the low 64 bits of 128-bit lanes of "dst", with the high 64 bits of 128-bit lanes being copied from from "a" to "dst". + +dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] +dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] +dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] +dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] +dst[127:64] := a[127:64] +dst[143:128] := (a >> (imm8[1:0] * 16))[143:128] +dst[159:144] := (a >> (imm8[3:2] * 16))[143:128] +dst[175:160] := (a >> (imm8[5:4] * 16))[143:128] +dst[191:176] := (a >> (imm8[7:6] * 16))[143:128] +dst[255:192] := a[255:192] +dst[271:256] := (a >> (imm8[1:0] * 16))[271:256] +dst[287:272] := (a >> (imm8[3:2] * 16))[271:256] +dst[303:288] := (a >> (imm8[5:4] * 16))[271:256] +dst[319:304] := (a >> (imm8[7:6] * 16))[271:256] +dst[383:320] := a[383:320] +dst[399:384] := (a >> (imm8[1:0] * 16))[399:384] +dst[415:400] := (a >> (imm8[3:2] * 16))[399:384] +dst[431:416] := (a >> (imm8[5:4] * 16))[399:384] +dst[447:432] := (a >> (imm8[7:6] * 16))[399:384] +dst[511:448] := a[511:448] +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave 8-bit integers from the high half of each 128-bit lane in - "a" and "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) { - dst[7:0] := src1[71:64] - dst[15:8] := src2[71:64] - dst[23:16] := src1[79:72] - dst[31:24] := src2[79:72] - dst[39:32] := src1[87:80] - dst[47:40] := src2[87:80] - dst[55:48] := src1[95:88] - dst[63:56] := src2[95:88] - dst[71:64] := src1[103:96] - dst[79:72] := src2[103:96] - dst[87:80] := src1[111:104] - dst[95:88] := src2[111:104] - dst[103:96] := src1[119:112] - dst[111:104] := src2[119:112] - dst[119:112] := src1[127:120] - dst[127:120] := src2[127:120] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_HIGH_BYTES(a[255:128], b[255:128]) - tmp_dst[383:256] := INTERLEAVE_HIGH_BYTES(a[383:256], b[383:256]) - tmp_dst[511:384] := INTERLEAVE_HIGH_BYTES(a[511:384], b[511:384]) - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave 8-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) { + dst[7:0] := src1[71:64] + dst[15:8] := src2[71:64] + dst[23:16] := src1[79:72] + dst[31:24] := src2[79:72] + dst[39:32] := src1[87:80] + dst[47:40] := src2[87:80] + dst[55:48] := src1[95:88] + dst[63:56] := src2[95:88] + dst[71:64] := src1[103:96] + dst[79:72] := src2[103:96] + dst[87:80] := src1[111:104] + dst[95:88] := src2[111:104] + dst[103:96] := src1[119:112] + dst[111:104] := src2[119:112] + dst[119:112] := src1[127:120] + dst[127:120] := src2[127:120] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_BYTES(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_HIGH_BYTES(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_HIGH_BYTES(a[511:384], b[511:384]) +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave 8-bit integers from the high half of each 128-bit lane in - "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) { - dst[7:0] := src1[71:64] - dst[15:8] := src2[71:64] - dst[23:16] := src1[79:72] - dst[31:24] := src2[79:72] - dst[39:32] := src1[87:80] - dst[47:40] := src2[87:80] - dst[55:48] := src1[95:88] - dst[63:56] := src2[95:88] - dst[71:64] := src1[103:96] - dst[79:72] := src2[103:96] - dst[87:80] := src1[111:104] - dst[95:88] := src2[111:104] - dst[103:96] := src1[119:112] - dst[111:104] := src2[119:112] - dst[119:112] := src1[127:120] - dst[127:120] := src2[127:120] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_HIGH_BYTES(a[255:128], b[255:128]) - tmp_dst[383:256] := INTERLEAVE_HIGH_BYTES(a[383:256], b[383:256]) - tmp_dst[511:384] := INTERLEAVE_HIGH_BYTES(a[511:384], b[511:384]) - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave 8-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) { + dst[7:0] := src1[71:64] + dst[15:8] := src2[71:64] + dst[23:16] := src1[79:72] + dst[31:24] := src2[79:72] + dst[39:32] := src1[87:80] + dst[47:40] := src2[87:80] + dst[55:48] := src1[95:88] + dst[63:56] := src2[95:88] + dst[71:64] := src1[103:96] + dst[79:72] := src2[103:96] + dst[87:80] := src1[111:104] + dst[95:88] := src2[111:104] + dst[103:96] := src1[119:112] + dst[111:104] := src2[119:112] + dst[119:112] := src1[127:120] + dst[127:120] := src2[127:120] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_BYTES(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_HIGH_BYTES(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_HIGH_BYTES(a[511:384], b[511:384]) +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - Unpack and interleave 8-bit integers from the high half of each 128-bit lane in - "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) { - dst[7:0] := src1[71:64] - dst[15:8] := src2[71:64] - dst[23:16] := src1[79:72] - dst[31:24] := src2[79:72] - dst[39:32] := src1[87:80] - dst[47:40] := src2[87:80] - dst[55:48] := src1[95:88] - dst[63:56] := src2[95:88] - dst[71:64] := src1[103:96] - dst[79:72] := src2[103:96] - dst[87:80] := src1[111:104] - dst[95:88] := src2[111:104] - dst[103:96] := src1[119:112] - dst[111:104] := src2[119:112] - dst[119:112] := src1[127:120] - dst[127:120] := src2[127:120] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) - dst[255:128] := INTERLEAVE_HIGH_BYTES(a[255:128], b[255:128]) - dst[383:256] := INTERLEAVE_HIGH_BYTES(a[383:256], b[383:256]) - dst[511:384] := INTERLEAVE_HIGH_BYTES(a[511:384], b[511:384]) - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + Unpack and interleave 8-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) { + dst[7:0] := src1[71:64] + dst[15:8] := src2[71:64] + dst[23:16] := src1[79:72] + dst[31:24] := src2[79:72] + dst[39:32] := src1[87:80] + dst[47:40] := src2[87:80] + dst[55:48] := src1[95:88] + dst[63:56] := src2[95:88] + dst[71:64] := src1[103:96] + dst[79:72] := src2[103:96] + dst[87:80] := src1[111:104] + dst[95:88] := src2[111:104] + dst[103:96] := src1[119:112] + dst[111:104] := src2[119:112] + dst[119:112] := src1[127:120] + dst[127:120] := src2[127:120] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_HIGH_BYTES(a[255:128], b[255:128]) +dst[383:256] := INTERLEAVE_HIGH_BYTES(a[383:256], b[383:256]) +dst[511:384] := INTERLEAVE_HIGH_BYTES(a[511:384], b[511:384]) +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave 16-bit integers from the high half of each 128-bit lane - in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) { - dst[15:0] := src1[79:64] - dst[31:16] := src2[79:64] - dst[47:32] := src1[95:80] - dst[63:48] := src2[95:80] - dst[79:64] := src1[111:96] - dst[95:80] := src2[111:96] - dst[111:96] := src1[127:112] - dst[127:112] := src2[127:112] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_HIGH_WORDS(a[255:128], b[255:128]) - tmp_dst[383:256] := INTERLEAVE_HIGH_WORDS(a[383:256], b[383:256]) - tmp_dst[511:384] := INTERLEAVE_HIGH_WORDS(a[511:384], b[511:384]) - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave 16-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) { + dst[15:0] := src1[79:64] + dst[31:16] := src2[79:64] + dst[47:32] := src1[95:80] + dst[63:48] := src2[95:80] + dst[79:64] := src1[111:96] + dst[95:80] := src2[111:96] + dst[111:96] := src1[127:112] + dst[127:112] := src2[127:112] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_WORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_HIGH_WORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_HIGH_WORDS(a[511:384], b[511:384]) +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave 16-bit integers from the high half of each 128-bit lane - in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) { - dst[15:0] := src1[79:64] - dst[31:16] := src2[79:64] - dst[47:32] := src1[95:80] - dst[63:48] := src2[95:80] - dst[79:64] := src1[111:96] - dst[95:80] := src2[111:96] - dst[111:96] := src1[127:112] - dst[127:112] := src2[127:112] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_HIGH_WORDS(a[255:128], b[255:128]) - tmp_dst[383:256] := INTERLEAVE_HIGH_WORDS(a[383:256], b[383:256]) - tmp_dst[511:384] := INTERLEAVE_HIGH_WORDS(a[511:384], b[511:384]) - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave 16-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) { + dst[15:0] := src1[79:64] + dst[31:16] := src2[79:64] + dst[47:32] := src1[95:80] + dst[63:48] := src2[95:80] + dst[79:64] := src1[111:96] + dst[95:80] := src2[111:96] + dst[111:96] := src1[127:112] + dst[127:112] := src2[127:112] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_WORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_HIGH_WORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_HIGH_WORDS(a[511:384], b[511:384]) +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - Unpack and interleave 16-bit integers from the high half of each 128-bit lane - in "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) { - dst[15:0] := src1[79:64] - dst[31:16] := src2[79:64] - dst[47:32] := src1[95:80] - dst[63:48] := src2[95:80] - dst[79:64] := src1[111:96] - dst[95:80] := src2[111:96] - dst[111:96] := src1[127:112] - dst[127:112] := src2[127:112] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) - dst[255:128] := INTERLEAVE_HIGH_WORDS(a[255:128], b[255:128]) - dst[383:256] := INTERLEAVE_HIGH_WORDS(a[383:256], b[383:256]) - dst[511:384] := INTERLEAVE_HIGH_WORDS(a[511:384], b[511:384]) - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + Unpack and interleave 16-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) { + dst[15:0] := src1[79:64] + dst[31:16] := src2[79:64] + dst[47:32] := src1[95:80] + dst[63:48] := src2[95:80] + dst[79:64] := src1[111:96] + dst[95:80] := src2[111:96] + dst[111:96] := src1[127:112] + dst[127:112] := src2[127:112] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_HIGH_WORDS(a[255:128], b[255:128]) +dst[383:256] := INTERLEAVE_HIGH_WORDS(a[383:256], b[383:256]) +dst[511:384] := INTERLEAVE_HIGH_WORDS(a[511:384], b[511:384]) +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave 8-bit integers from the low half of each 128-bit lane in - "a" and "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) { - dst[7:0] := src1[7:0] - dst[15:8] := src2[7:0] - dst[23:16] := src1[15:8] - dst[31:24] := src2[15:8] - dst[39:32] := src1[23:16] - dst[47:40] := src2[23:16] - dst[55:48] := src1[31:24] - dst[63:56] := src2[31:24] - dst[71:64] := src1[39:32] - dst[79:72] := src2[39:32] - dst[87:80] := src1[47:40] - dst[95:88] := src2[47:40] - dst[103:96] := src1[55:48] - dst[111:104] := src2[55:48] - dst[119:112] := src1[63:56] - dst[127:120] := src2[63:56] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_BYTES(a[255:128], b[255:128]) - tmp_dst[383:256] := INTERLEAVE_BYTES(a[383:256], b[383:256]) - tmp_dst[511:384] := INTERLEAVE_BYTES(a[511:384], b[511:384]) - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave 8-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) { + dst[7:0] := src1[7:0] + dst[15:8] := src2[7:0] + dst[23:16] := src1[15:8] + dst[31:24] := src2[15:8] + dst[39:32] := src1[23:16] + dst[47:40] := src2[23:16] + dst[55:48] := src1[31:24] + dst[63:56] := src2[31:24] + dst[71:64] := src1[39:32] + dst[79:72] := src2[39:32] + dst[87:80] := src1[47:40] + dst[95:88] := src2[47:40] + dst[103:96] := src1[55:48] + dst[111:104] := src2[55:48] + dst[119:112] := src1[63:56] + dst[127:120] := src2[63:56] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_BYTES(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_BYTES(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_BYTES(a[511:384], b[511:384]) +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave 8-bit integers from the low half of each 128-bit lane in - "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) { - dst[7:0] := src1[7:0] - dst[15:8] := src2[7:0] - dst[23:16] := src1[15:8] - dst[31:24] := src2[15:8] - dst[39:32] := src1[23:16] - dst[47:40] := src2[23:16] - dst[55:48] := src1[31:24] - dst[63:56] := src2[31:24] - dst[71:64] := src1[39:32] - dst[79:72] := src2[39:32] - dst[87:80] := src1[47:40] - dst[95:88] := src2[47:40] - dst[103:96] := src1[55:48] - dst[111:104] := src2[55:48] - dst[119:112] := src1[63:56] - dst[127:120] := src2[63:56] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_BYTES(a[255:128], b[255:128]) - tmp_dst[383:256] := INTERLEAVE_BYTES(a[383:256], b[383:256]) - tmp_dst[511:384] := INTERLEAVE_BYTES(a[511:384], b[511:384]) - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave 8-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) { + dst[7:0] := src1[7:0] + dst[15:8] := src2[7:0] + dst[23:16] := src1[15:8] + dst[31:24] := src2[15:8] + dst[39:32] := src1[23:16] + dst[47:40] := src2[23:16] + dst[55:48] := src1[31:24] + dst[63:56] := src2[31:24] + dst[71:64] := src1[39:32] + dst[79:72] := src2[39:32] + dst[87:80] := src1[47:40] + dst[95:88] := src2[47:40] + dst[103:96] := src1[55:48] + dst[111:104] := src2[55:48] + dst[119:112] := src1[63:56] + dst[127:120] := src2[63:56] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_BYTES(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_BYTES(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_BYTES(a[511:384], b[511:384]) +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - Unpack and interleave 8-bit integers from the low half of each 128-bit lane in - "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) { - dst[7:0] := src1[7:0] - dst[15:8] := src2[7:0] - dst[23:16] := src1[15:8] - dst[31:24] := src2[15:8] - dst[39:32] := src1[23:16] - dst[47:40] := src2[23:16] - dst[55:48] := src1[31:24] - dst[63:56] := src2[31:24] - dst[71:64] := src1[39:32] - dst[79:72] := src2[39:32] - dst[87:80] := src1[47:40] - dst[95:88] := src2[47:40] - dst[103:96] := src1[55:48] - dst[111:104] := src2[55:48] - dst[119:112] := src1[63:56] - dst[127:120] := src2[63:56] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) - dst[255:128] := INTERLEAVE_BYTES(a[255:128], b[255:128]) - dst[383:256] := INTERLEAVE_BYTES(a[383:256], b[383:256]) - dst[511:384] := INTERLEAVE_BYTES(a[511:384], b[511:384]) - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + Unpack and interleave 8-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) { + dst[7:0] := src1[7:0] + dst[15:8] := src2[7:0] + dst[23:16] := src1[15:8] + dst[31:24] := src2[15:8] + dst[39:32] := src1[23:16] + dst[47:40] := src2[23:16] + dst[55:48] := src1[31:24] + dst[63:56] := src2[31:24] + dst[71:64] := src1[39:32] + dst[79:72] := src2[39:32] + dst[87:80] := src1[47:40] + dst[95:88] := src2[47:40] + dst[103:96] := src1[55:48] + dst[111:104] := src2[55:48] + dst[119:112] := src1[63:56] + dst[127:120] := src2[63:56] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_BYTES(a[255:128], b[255:128]) +dst[383:256] := INTERLEAVE_BYTES(a[383:256], b[383:256]) +dst[511:384] := INTERLEAVE_BYTES(a[511:384], b[511:384]) +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave 16-bit integers from the low half of each 128-bit lane in - "a" and "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) { - dst[15:0] := src1[15:0] - dst[31:16] := src2[15:0] - dst[47:32] := src1[31:16] - dst[63:48] := src2[31:16] - dst[79:64] := src1[47:32] - dst[95:80] := src2[47:32] - dst[111:96] := src1[63:48] - dst[127:112] := src2[63:48] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_WORDS(a[255:128], b[255:128]) - tmp_dst[383:256] := INTERLEAVE_WORDS(a[383:256], b[383:256]) - tmp_dst[511:384] := INTERLEAVE_WORDS(a[511:384], b[511:384]) - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave 16-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) { + dst[15:0] := src1[15:0] + dst[31:16] := src2[15:0] + dst[47:32] := src1[31:16] + dst[63:48] := src2[31:16] + dst[79:64] := src1[47:32] + dst[95:80] := src2[47:32] + dst[111:96] := src1[63:48] + dst[127:112] := src2[63:48] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_WORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_WORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_WORDS(a[511:384], b[511:384]) +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave 16-bit integers from the low half of each 128-bit lane in - "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) { - dst[15:0] := src1[15:0] - dst[31:16] := src2[15:0] - dst[47:32] := src1[31:16] - dst[63:48] := src2[31:16] - dst[79:64] := src1[47:32] - dst[95:80] := src2[47:32] - dst[111:96] := src1[63:48] - dst[127:112] := src2[63:48] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_WORDS(a[255:128], b[255:128]) - tmp_dst[383:256] := INTERLEAVE_WORDS(a[383:256], b[383:256]) - tmp_dst[511:384] := INTERLEAVE_WORDS(a[511:384], b[511:384]) - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave 16-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) { + dst[15:0] := src1[15:0] + dst[31:16] := src2[15:0] + dst[47:32] := src1[31:16] + dst[63:48] := src2[31:16] + dst[79:64] := src1[47:32] + dst[95:80] := src2[47:32] + dst[111:96] := src1[63:48] + dst[127:112] := src2[63:48] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_WORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_WORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_WORDS(a[511:384], b[511:384]) +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - Unpack and interleave 16-bit integers from the low half of each 128-bit lane in - "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) { - dst[15:0] := src1[15:0] - dst[31:16] := src2[15:0] - dst[47:32] := src1[31:16] - dst[63:48] := src2[31:16] - dst[79:64] := src1[47:32] - dst[95:80] := src2[47:32] - dst[111:96] := src1[63:48] - dst[127:112] := src2[63:48] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) - dst[255:128] := INTERLEAVE_WORDS(a[255:128], b[255:128]) - dst[383:256] := INTERLEAVE_WORDS(a[383:256], b[383:256]) - dst[511:384] := INTERLEAVE_WORDS(a[511:384], b[511:384]) - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Miscellaneous + + + + Unpack and interleave 16-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) { + dst[15:0] := src1[15:0] + dst[31:16] := src2[15:0] + dst[47:32] := src1[31:16] + dst[63:48] := src2[31:16] + dst[79:64] := src1[47:32] + dst[95:80] := src2[47:32] + dst[111:96] := src1[63:48] + dst[127:112] := src2[63:48] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_WORDS(a[255:128], b[255:128]) +dst[383:256] := INTERLEAVE_WORDS(a[383:256], b[383:256]) +dst[511:384] := INTERLEAVE_WORDS(a[511:384], b[511:384]) +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Miscellaneous
- - - - - Load packed 16-bit integers from memory into "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := MEM[mem_addr+i+15:mem_addr+i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Load + + + + + Load packed 16-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := MEM[mem_addr+i+15:mem_addr+i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Load
- - - - Load packed 16-bit integers from memory into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := MEM[mem_addr+i+15:mem_addr+i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Load + + + + Load packed 16-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := MEM[mem_addr+i+15:mem_addr+i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Load
- - - - - Load packed 8-bit integers from memory into "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := MEM[mem_addr+i+7:mem_addr+i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Load + + + + + Load packed 8-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := MEM[mem_addr+i+7:mem_addr+i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Load
- - - - Load packed 8-bit integers from memory into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := MEM[mem_addr+i+7:mem_addr+i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Load + + + + Load packed 8-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := MEM[mem_addr+i+7:mem_addr+i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Load
- - - Load 512-bits (composed of 32 packed 16-bit integers) from memory into "dst". - "mem_addr" does not need to be aligned on any particular boundary. - - dst[511:0] := MEM[mem_addr+511:mem_addr] - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Load + + + Load 512-bits (composed of 32 packed 16-bit integers) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[511:0] := MEM[mem_addr+511:mem_addr] +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Load
- - - Load 512-bits (composed of 64 packed 8-bit integers) from memory into "dst". - "mem_addr" does not need to be aligned on any particular boundary. - - dst[511:0] := MEM[mem_addr+511:mem_addr] - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Load + + + Load 512-bits (composed of 64 packed 8-bit integers) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[511:0] := MEM[mem_addr+511:mem_addr] +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Load
- - - Load 32-bit mask from memory into "k". - - k[31:0] := MEM[mem_addr+31:mem_addr] - - - AVX512BW -
immintrin.h
- Load + + + Load 32-bit mask from memory into "k". + +k[31:0] := MEM[mem_addr+31:mem_addr] + + + AVX512BW +
immintrin.h
+ Load
- - - Load 64-bit mask from memory into "k". - - k[63:0] := MEM[mem_addr+63:mem_addr] - - - AVX512BW -
immintrin.h
- Load + + + Load 64-bit mask from memory into "k". + +k[63:0] := MEM[mem_addr+63:mem_addr] + + + AVX512BW +
immintrin.h
+ Load
- - - - - Move packed 16-bit integers from "a" into "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := a[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Move + + + + + Move packed 16-bit integers from "a" into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Move
- - - - Move packed 16-bit integers from "a" into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := a[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Move + + + + Move packed 16-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Move
- - - - - Move packed 8-bit integers from "a" into "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := a[i+7:i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Move + + + + + Move packed 8-bit integers from "a" into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Move
- - - - Move packed 8-bit integers from "a" into "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := a[i+7:i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Move + + + + Move packed 8-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Move
- - - - - Store packed 16-bit integers from "a" into memory using writemask "k". - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 31 - i := j*16 - IF k[j] - MEM[mem_addr+i+15:mem_addr+i] := a[i+15:i] - FI - ENDFOR - - - AVX512BW -
immintrin.h
- Store + + + + + Store packed 16-bit integers from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 31 + i := j*16 + IF k[j] + MEM[mem_addr+i+15:mem_addr+i] := a[i+15:i] + FI +ENDFOR + + + AVX512BW +
immintrin.h
+ Store
- - - - - Store packed 8-bit integers from "a" into memory using writemask "k". - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 63 - i := j*8 - IF k[j] - MEM[mem_addr+i+7:mem_addr+i] := a[i+7:i] - FI - ENDFOR - - - AVX512BW -
immintrin.h
- Store + + + + + Store packed 8-bit integers from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 63 + i := j*8 + IF k[j] + MEM[mem_addr+i+7:mem_addr+i] := a[i+7:i] + FI +ENDFOR + + + AVX512BW +
immintrin.h
+ Store
- - - - Store 512-bits (composed of 32 packed 16-bit integers) from "a" into memory. - "mem_addr" does not need to be aligned on any particular boundary. - - MEM[mem_addr+511:mem_addr] := a[511:0] - - - AVX512BW -
immintrin.h
- Store + + + + Store 512-bits (composed of 32 packed 16-bit integers) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + + AVX512BW +
immintrin.h
+ Store
- - - - Store 512-bits (composed of 64 packed 8-bit integers) from "a" into memory. - "mem_addr" does not need to be aligned on any particular boundary. - - MEM[mem_addr+511:mem_addr] := a[511:0] - - - AVX512BW -
immintrin.h
- Store + + + + Store 512-bits (composed of 64 packed 8-bit integers) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + + AVX512BW +
immintrin.h
+ Store
- - - - Store 32-bit mask from "a" into memory. - - MEM[mem_addr+31:mem_addr] := a[31:0] - - - AVX512BW -
immintrin.h
- Store + + + + Store 32-bit mask from "a" into memory. + +MEM[mem_addr+31:mem_addr] := a[31:0] + + + AVX512BW +
immintrin.h
+ Store
- - - - Store 64-bit mask from "a" into memory. - - MEM[mem_addr+63:mem_addr] := a[63:0] - - - AVX512BW -
immintrin.h
- Store + + + + Store 64-bit mask from "a" into memory. + +MEM[mem_addr+63:mem_addr] := a[63:0] + + + AVX512BW +
immintrin.h
+ Store
- - - Compute the absolute value of packed signed 8-bit integers in "a", and store - the unsigned results in "dst". - - FOR j := 0 to 63 - i := j*8 - dst[i+7:i] := ABS(a[i+7:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + Compute the absolute value of packed signed 8-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 63 + i := j*8 + dst[i+7:i] := ABS(a[i+7:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Compute the absolute value of packed signed 8-bit integers in "a", and store - the unsigned results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := ABS(a[i+7:i]) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Compute the absolute value of packed signed 8-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := ABS(a[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Compute the absolute value of packed signed 8-bit integers in "a", and store - the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := ABS(a[i+7:i]) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Compute the absolute value of packed signed 8-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := ABS(a[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - Compute the absolute value of packed signed 16-bit integers in "a", and store - the unsigned results in "dst". - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := ABS(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + Compute the absolute value of packed signed 16-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := ABS(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Compute the absolute value of packed signed 16-bit integers in "a", and store - the unsigned results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := ABS(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Compute the absolute value of packed signed 16-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := ABS(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Compute the absolute value of packed signed 16-bit integers in "a", and store - the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := ABS(a[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Compute the absolute value of packed signed 16-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := ABS(a[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Add packed 8-bit integers in "a" and "b", and store the results in "dst". - - FOR j := 0 to 63 - i := j*8 - dst[i+7:i] := a[i+7:i] + b[i+7:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Add packed 8-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 63 + i := j*8 + dst[i+7:i] := a[i+7:i] + b[i+7:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Add packed 8-bit integers in "a" and "b", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := a[i+7:i] + b[i+7:i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Add packed 8-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] + b[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Add packed 8-bit integers in "a" and "b", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := a[i+7:i] + b[i+7:i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Add packed 8-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] + b[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Add packed signed 8-bit integers in "a" and "b" using saturation, and store the - results in "dst". - - FOR j := 0 to 63 - i := j*8 - dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] ) - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Add packed signed 8-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 63 + i := j*8 + dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] ) +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Add packed signed 8-bit integers in "a" and "b" using saturation, and store the - results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] ) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Add packed signed 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] ) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Add packed signed 8-bit integers in "a" and "b" using saturation, and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] ) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Add packed signed 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] ) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Add packed signed 16-bit integers in "a" and "b" using saturation, and store - the results in "dst". - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] ) - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Add packed signed 16-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] ) +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Add packed signed 16-bit integers in "a" and "b" using saturation, and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] ) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Add packed signed 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] ) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Add packed signed 16-bit integers in "a" and "b" using saturation, and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] ) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Add packed signed 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] ) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store - the results in "dst". - - FOR j := 0 to 63 - i := j*8 - dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] ) - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 63 + i := j*8 + dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] ) +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] ) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] ) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] ) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] ) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store - the results in "dst". - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] ) - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] ) +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] ) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] ) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] ) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] ) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Add packed 16-bit integers in "a" and "b", and store the results in "dst". - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := a[i+15:i] + b[i+15:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Add packed 16-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := a[i+15:i] + b[i+15:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Add packed 16-bit integers in "a" and "b", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := a[i+15:i] + b[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Add packed 16-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] + b[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Add packed 16-bit integers in "a" and "b", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := a[i+15:i] + b[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Add packed 16-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] + b[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Average packed unsigned 8-bit integers in "a" and "b", and store the results in - "dst". - - FOR j := 0 to 63 - i := j*8 - dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 63 + i := j*8 + dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Average packed unsigned 8-bit integers in "a" and "b", and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Average packed unsigned 8-bit integers in "a" and "b", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Average packed unsigned 16-bit integers in "a" and "b", and store the results - in "dst". - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Average packed unsigned 16-bit integers in "a" and "b", and store the results - in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Average packed unsigned 16-bit integers in "a" and "b", and store the results - in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is - not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Vertically multiply each unsigned 8-bit integer from "a" with the corresponding - signed 8-bit integer from "b", producing intermediate signed 16-bit integers. - Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the - saturated results in "dst". - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Vertically multiply each unsigned 8-bit integer from "a" with the corresponding signed 8-bit integer from "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed unsigned 8-bit integers in "a" by packed signed 8-bit integers - in "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs - of intermediate signed 16-bit integers, and pack the saturated results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Multiply packed unsigned 8-bit integers in "a" by packed signed 8-bit integers in "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Multiply packed unsigned 8-bit integers in "a" by packed signed 8-bit integers - in "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs - of intermediate signed 16-bit integers, and pack the saturated results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Multiply packed unsigned 8-bit integers in "a" by packed signed 8-bit integers in "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Multiply packed signed 16-bit integers in "a" and "b", producing intermediate - signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, - and pack the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + - SignExtend32(a[i+15:i]*b[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + SignExtend32(a[i+15:i]*b[i+15:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed signed 16-bit integers in "a" and "b", producing intermediate - signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, - and pack the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + - SignExtend32(a[i+15:i]*b[i+15:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + SignExtend32(a[i+15:i]*b[i+15:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Multiply packed signed 16-bit integers in "a" and "b", producing intermediate - signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, - and pack the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + - SignExtend32(a[i+15:i]*b[i+15:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + SignExtend32(a[i+15:i]*b[i+15:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Compare packed signed 8-bit integers in "a" and "b", and store packed maximum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Compare packed signed 8-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Compare packed signed 8-bit integers in "a" and "b", and store packed maximum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Compare packed signed 8-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Compare packed signed 8-bit integers in "a" and "b", and store packed maximum - values in "dst". - - FOR j := 0 to 63 - i := j*8 - dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Compare packed signed 8-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 63 + i := j*8 + dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Compare packed signed 16-bit integers in "a" and "b", and store packed maximum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Compare packed signed 16-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Compare packed signed 16-bit integers in "a" and "b", and store packed maximum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Compare packed signed 16-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Compare packed signed 16-bit integers in "a" and "b", and store packed maximum - values in "dst". - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Compare packed signed 16-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum - values in "dst". - - FOR j := 0 to 63 - i := j*8 - dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 63 + i := j*8 + dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Compare packed unsigned 16-bit integers in "a" and "b", and store packed - maximum values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Compare packed unsigned 16-bit integers in "a" and "b", and store packed - maximum values in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Compare packed unsigned 16-bit integers in "a" and "b", and store packed - maximum values in "dst". - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Compare packed signed 8-bit integers in "a" and "b", and store packed minimum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Compare packed signed 8-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Compare packed signed 8-bit integers in "a" and "b", and store packed minimum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Compare packed signed 8-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Compare packed signed 8-bit integers in "a" and "b", and store packed minimum - values in "dst". - - FOR j := 0 to 63 - i := j*8 - dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Compare packed signed 8-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 63 + i := j*8 + dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Compare packed signed 16-bit integers in "a" and "b", and store packed minimum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Compare packed signed 16-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Compare packed signed 16-bit integers in "a" and "b", and store packed minimum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Compare packed signed 16-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Compare packed signed 16-bit integers in "a" and "b", and store packed minimum - values in "dst". - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Compare packed signed 16-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum - values in "dst". - - FOR j := 0 to 63 - i := j*8 - dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 63 + i := j*8 + dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Compare packed unsigned 16-bit integers in "a" and "b", and store packed - minimum values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Compare packed unsigned 16-bit integers in "a" and "b", and store packed - minimum values in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Compare packed unsigned 16-bit integers in "a" and "b", and store packed - minimum values in "dst". - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed signed 16-bit integers in "a" and "b", producing intermediate - signed 32-bit integers. Truncate each intermediate integer to the 18 most significant - bits, round by adding 1, and store bits [16:1] to "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) >> 14) + 1 - dst[i+15:i] := tmp[16:1] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) >> 14) + 1 + dst[i+15:i] := tmp[16:1] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Multiply packed signed 16-bit integers in "a" and "b", producing intermediate - signed 32-bit integers. Truncate each intermediate integer to the 18 most significant - bits, round by adding 1, and store bits [16:1] to "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) >> 14) + 1 - dst[i+15:i] := tmp[16:1] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) >> 14) + 1 + dst[i+15:i] := tmp[16:1] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Multiply packed signed 16-bit integers in "a" and "b", producing intermediate - signed 32-bit integers. Truncate each intermediate integer to the 18 most significant - bits, round by adding 1, and store bits [16:1] to "dst". - - FOR j := 0 to 31 - i := j*16 - tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) >> 14) + 1 - dst[i+15:i] := tmp[16:1] - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst". + +FOR j := 0 to 31 + i := j*16 + tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) >> 14) + 1 + dst[i+15:i] := tmp[16:1] +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Multiply the packed unsigned 16-bit integers in "a" and "b", producing - intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - tmp[31:0] := a[i+15:i] * b[i+15:i] - dst[i+15:i] := tmp[31:16] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Multiply the packed unsigned 16-bit integers in "a" and "b", producing - intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - tmp[31:0] := a[i+15:i] * b[i+15:i] - dst[i+15:i] := tmp[31:16] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Multiply the packed unsigned 16-bit integers in "a" and "b", producing - intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in - "dst". - - FOR j := 0 to 31 - i := j*16 - tmp[31:0] := a[i+15:i] * b[i+15:i] - dst[i+15:i] := tmp[31:16] - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 31 + i := j*16 + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Multiply the packed signed 16-bit integers in "a" and "b", producing - intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) - dst[i+15:i] := tmp[31:16] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Multiply the packed signed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Multiply the packed signed 16-bit integers in "a" and "b", producing - intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) - dst[i+15:i] := tmp[31:16] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Multiply the packed signed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Multiply the packed signed 16-bit integers in "a" and "b", producing - intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in - "dst". - - FOR j := 0 to 31 - i := j*16 - tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) - dst[i+15:i] := tmp[31:16] - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Multiply the packed signed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 31 + i := j*16 + tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) + dst[i+15:i] := tmp[31:16] +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Multiply the packed 16-bit integers in "a" and "b", producing intermediate - 32-bit integers, and store the low 16 bits of the intermediate integers in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) - dst[i+15:i] := tmp[15:0] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) + dst[i+15:i] := tmp[15:0] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Multiply the packed 16-bit integers in "a" and "b", producing intermediate - 32-bit integers, and store the low 16 bits of the intermediate integers in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) - dst[i+15:i] := tmp[15:0] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) + dst[i+15:i] := tmp[15:0] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Multiply the packed 16-bit integers in "a" and "b", producing intermediate - 32-bit integers, and store the low 16 bits of the intermediate integers in "dst". - - FOR j := 0 to 31 - i := j*16 - tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) - dst[i+15:i] := tmp[15:0] - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 31 + i := j*16 + tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) + dst[i+15:i] := tmp[15:0] +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := a[i+7:i] - b[i+7:i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] - b[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := a[i+7:i] - b[i+7:i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := a[i+7:i] - b[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and - store the results in "dst". - - FOR j := 0 to 63 - i := j*8 - dst[i+7:i] := a[i+7:i] - b[i+7:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 63 + i := j*8 + dst[i+7:i] := a[i+7:i] - b[i+7:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" - using saturation, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i]) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" - using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i]) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" - using saturation, and store the results in "dst". - - FOR j := 0 to 63 - i := j*8 - dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 63 + i := j*8 + dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in - "a" using saturation, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in - "a" using saturation, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in - "a" using saturation, and store the results in "dst". - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit - integers in "a" using saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i]) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit - integers in "a" using saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i]) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit - integers in "a" using saturation, and store the results in "dst". - - FOR j := 0 to 63 - i := j*8 - dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 63 + i := j*8 + dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit - integers in "a" using saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit - integers in "a" using saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit - integers in "a" using saturation, and store the results in "dst". - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := a[i+15:i] - b[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] - b[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - - Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := a[i+15:i] - b[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := a[i+15:i] - b[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- - - - Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and - store the results in "dst". - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := a[i+15:i] - b[i+15:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Arithmetic + + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := a[i+15:i] - b[i+15:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Arithmetic
- Miscellaneous - - - - - - Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit - integers using signed saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - tmp_dst[15:0] := Saturate16(a[31:0]) - tmp_dst[31:16] := Saturate16(a[63:32]) - tmp_dst[47:32] := Saturate16(a[95:64]) - tmp_dst[63:48] := Saturate16(a[127:96]) - tmp_dst[79:64] := Saturate16(b[31:0]) - tmp_dst[95:80] := Saturate16(b[63:32]) - tmp_dst[111:96] := Saturate16(b[95:64]) - tmp_dst[127:112] := Saturate16(b[127:96]) - tmp_dst[143:128] := Saturate16(a[159:128]) - tmp_dst[159:144] := Saturate16(a[191:160]) - tmp_dst[175:160] := Saturate16(a[223:192]) - tmp_dst[191:176] := Saturate16(a[255:224]) - tmp_dst[207:192] := Saturate16(b[159:128]) - tmp_dst[223:208] := Saturate16(b[191:160]) - tmp_dst[239:224] := Saturate16(b[223:192]) - tmp_dst[255:240] := Saturate16(b[255:224]) - tmp_dst[271:256] := Saturate16(a[287:256]) - tmp_dst[287:272] := Saturate16(a[319:288]) - tmp_dst[303:288] := Saturate16(a[351:320]) - tmp_dst[319:304] := Saturate16(a[383:352]) - tmp_dst[335:320] := Saturate16(b[287:256]) - tmp_dst[351:336] := Saturate16(b[319:288]) - tmp_dst[367:352] := Saturate16(b[351:320]) - tmp_dst[383:368] := Saturate16(b[383:352]) - tmp_dst[399:384] := Saturate16(a[415:384]) - tmp_dst[415:400] := Saturate16(a[447:416]) - tmp_dst[431:416] := Saturate16(a[479:448]) - tmp_dst[447:432] := Saturate16(a[511:480]) - tmp_dst[463:448] := Saturate16(b[415:384]) - tmp_dst[479:464] := Saturate16(b[447:416]) - tmp_dst[495:480] := Saturate16(b[479:448]) - tmp_dst[511:496] := Saturate16(b[511:480]) - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Convert + Miscellaneous + + + + + + Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[15:0] := Saturate16(a[31:0]) +tmp_dst[31:16] := Saturate16(a[63:32]) +tmp_dst[47:32] := Saturate16(a[95:64]) +tmp_dst[63:48] := Saturate16(a[127:96]) +tmp_dst[79:64] := Saturate16(b[31:0]) +tmp_dst[95:80] := Saturate16(b[63:32]) +tmp_dst[111:96] := Saturate16(b[95:64]) +tmp_dst[127:112] := Saturate16(b[127:96]) +tmp_dst[143:128] := Saturate16(a[159:128]) +tmp_dst[159:144] := Saturate16(a[191:160]) +tmp_dst[175:160] := Saturate16(a[223:192]) +tmp_dst[191:176] := Saturate16(a[255:224]) +tmp_dst[207:192] := Saturate16(b[159:128]) +tmp_dst[223:208] := Saturate16(b[191:160]) +tmp_dst[239:224] := Saturate16(b[223:192]) +tmp_dst[255:240] := Saturate16(b[255:224]) +tmp_dst[271:256] := Saturate16(a[287:256]) +tmp_dst[287:272] := Saturate16(a[319:288]) +tmp_dst[303:288] := Saturate16(a[351:320]) +tmp_dst[319:304] := Saturate16(a[383:352]) +tmp_dst[335:320] := Saturate16(b[287:256]) +tmp_dst[351:336] := Saturate16(b[319:288]) +tmp_dst[367:352] := Saturate16(b[351:320]) +tmp_dst[383:368] := Saturate16(b[383:352]) +tmp_dst[399:384] := Saturate16(a[415:384]) +tmp_dst[415:400] := Saturate16(a[447:416]) +tmp_dst[431:416] := Saturate16(a[479:448]) +tmp_dst[447:432] := Saturate16(a[511:480]) +tmp_dst[463:448] := Saturate16(b[415:384]) +tmp_dst[479:464] := Saturate16(b[447:416]) +tmp_dst[495:480] := Saturate16(b[479:448]) +tmp_dst[511:496] := Saturate16(b[511:480]) +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- Miscellaneous - - - - - Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit - integers using signed saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - tmp_dst[15:0] := Saturate16(a[31:0]) - tmp_dst[31:16] := Saturate16(a[63:32]) - tmp_dst[47:32] := Saturate16(a[95:64]) - tmp_dst[63:48] := Saturate16(a[127:96]) - tmp_dst[79:64] := Saturate16(b[31:0]) - tmp_dst[95:80] := Saturate16(b[63:32]) - tmp_dst[111:96] := Saturate16(b[95:64]) - tmp_dst[127:112] := Saturate16(b[127:96]) - tmp_dst[143:128] := Saturate16(a[159:128]) - tmp_dst[159:144] := Saturate16(a[191:160]) - tmp_dst[175:160] := Saturate16(a[223:192]) - tmp_dst[191:176] := Saturate16(a[255:224]) - tmp_dst[207:192] := Saturate16(b[159:128]) - tmp_dst[223:208] := Saturate16(b[191:160]) - tmp_dst[239:224] := Saturate16(b[223:192]) - tmp_dst[255:240] := Saturate16(b[255:224]) - tmp_dst[271:256] := Saturate16(a[287:256]) - tmp_dst[287:272] := Saturate16(a[319:288]) - tmp_dst[303:288] := Saturate16(a[351:320]) - tmp_dst[319:304] := Saturate16(a[383:352]) - tmp_dst[335:320] := Saturate16(b[287:256]) - tmp_dst[351:336] := Saturate16(b[319:288]) - tmp_dst[367:352] := Saturate16(b[351:320]) - tmp_dst[383:368] := Saturate16(b[383:352]) - tmp_dst[399:384] := Saturate16(a[415:384]) - tmp_dst[415:400] := Saturate16(a[447:416]) - tmp_dst[431:416] := Saturate16(a[479:448]) - tmp_dst[447:432] := Saturate16(a[511:480]) - tmp_dst[463:448] := Saturate16(b[415:384]) - tmp_dst[479:464] := Saturate16(b[447:416]) - tmp_dst[495:480] := Saturate16(b[479:448]) - tmp_dst[511:496] := Saturate16(b[511:480]) - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Convert + Miscellaneous + + + + + Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[15:0] := Saturate16(a[31:0]) +tmp_dst[31:16] := Saturate16(a[63:32]) +tmp_dst[47:32] := Saturate16(a[95:64]) +tmp_dst[63:48] := Saturate16(a[127:96]) +tmp_dst[79:64] := Saturate16(b[31:0]) +tmp_dst[95:80] := Saturate16(b[63:32]) +tmp_dst[111:96] := Saturate16(b[95:64]) +tmp_dst[127:112] := Saturate16(b[127:96]) +tmp_dst[143:128] := Saturate16(a[159:128]) +tmp_dst[159:144] := Saturate16(a[191:160]) +tmp_dst[175:160] := Saturate16(a[223:192]) +tmp_dst[191:176] := Saturate16(a[255:224]) +tmp_dst[207:192] := Saturate16(b[159:128]) +tmp_dst[223:208] := Saturate16(b[191:160]) +tmp_dst[239:224] := Saturate16(b[223:192]) +tmp_dst[255:240] := Saturate16(b[255:224]) +tmp_dst[271:256] := Saturate16(a[287:256]) +tmp_dst[287:272] := Saturate16(a[319:288]) +tmp_dst[303:288] := Saturate16(a[351:320]) +tmp_dst[319:304] := Saturate16(a[383:352]) +tmp_dst[335:320] := Saturate16(b[287:256]) +tmp_dst[351:336] := Saturate16(b[319:288]) +tmp_dst[367:352] := Saturate16(b[351:320]) +tmp_dst[383:368] := Saturate16(b[383:352]) +tmp_dst[399:384] := Saturate16(a[415:384]) +tmp_dst[415:400] := Saturate16(a[447:416]) +tmp_dst[431:416] := Saturate16(a[479:448]) +tmp_dst[447:432] := Saturate16(a[511:480]) +tmp_dst[463:448] := Saturate16(b[415:384]) +tmp_dst[479:464] := Saturate16(b[447:416]) +tmp_dst[495:480] := Saturate16(b[479:448]) +tmp_dst[511:496] := Saturate16(b[511:480]) +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- Miscellaneous - - - - Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit - integers using signed saturation, and store the results in "dst". - - dst[15:0] := Saturate16(a[31:0]) - dst[31:16] := Saturate16(a[63:32]) - dst[47:32] := Saturate16(a[95:64]) - dst[63:48] := Saturate16(a[127:96]) - dst[79:64] := Saturate16(b[31:0]) - dst[95:80] := Saturate16(b[63:32]) - dst[111:96] := Saturate16(b[95:64]) - dst[127:112] := Saturate16(b[127:96]) - dst[143:128] := Saturate16(a[159:128]) - dst[159:144] := Saturate16(a[191:160]) - dst[175:160] := Saturate16(a[223:192]) - dst[191:176] := Saturate16(a[255:224]) - dst[207:192] := Saturate16(b[159:128]) - dst[223:208] := Saturate16(b[191:160]) - dst[239:224] := Saturate16(b[223:192]) - dst[255:240] := Saturate16(b[255:224]) - dst[271:256] := Saturate16(a[287:256]) - dst[287:272] := Saturate16(a[319:288]) - dst[303:288] := Saturate16(a[351:320]) - dst[319:304] := Saturate16(a[383:352]) - dst[335:320] := Saturate16(b[287:256]) - dst[351:336] := Saturate16(b[319:288]) - dst[367:352] := Saturate16(b[351:320]) - dst[383:368] := Saturate16(b[383:352]) - dst[399:384] := Saturate16(a[415:384]) - dst[415:400] := Saturate16(a[447:416]) - dst[431:416] := Saturate16(a[479:448]) - dst[447:432] := Saturate16(a[511:480]) - dst[463:448] := Saturate16(b[415:384]) - dst[479:464] := Saturate16(b[447:416]) - dst[495:480] := Saturate16(b[479:448]) - dst[511:496] := Saturate16(b[511:480]) - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Convert + Miscellaneous + + + + Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst". + +dst[15:0] := Saturate16(a[31:0]) +dst[31:16] := Saturate16(a[63:32]) +dst[47:32] := Saturate16(a[95:64]) +dst[63:48] := Saturate16(a[127:96]) +dst[79:64] := Saturate16(b[31:0]) +dst[95:80] := Saturate16(b[63:32]) +dst[111:96] := Saturate16(b[95:64]) +dst[127:112] := Saturate16(b[127:96]) +dst[143:128] := Saturate16(a[159:128]) +dst[159:144] := Saturate16(a[191:160]) +dst[175:160] := Saturate16(a[223:192]) +dst[191:176] := Saturate16(a[255:224]) +dst[207:192] := Saturate16(b[159:128]) +dst[223:208] := Saturate16(b[191:160]) +dst[239:224] := Saturate16(b[223:192]) +dst[255:240] := Saturate16(b[255:224]) +dst[271:256] := Saturate16(a[287:256]) +dst[287:272] := Saturate16(a[319:288]) +dst[303:288] := Saturate16(a[351:320]) +dst[319:304] := Saturate16(a[383:352]) +dst[335:320] := Saturate16(b[287:256]) +dst[351:336] := Saturate16(b[319:288]) +dst[367:352] := Saturate16(b[351:320]) +dst[383:368] := Saturate16(b[383:352]) +dst[399:384] := Saturate16(a[415:384]) +dst[415:400] := Saturate16(a[447:416]) +dst[431:416] := Saturate16(a[479:448]) +dst[447:432] := Saturate16(a[511:480]) +dst[463:448] := Saturate16(b[415:384]) +dst[479:464] := Saturate16(b[447:416]) +dst[495:480] := Saturate16(b[479:448]) +dst[511:496] := Saturate16(b[511:480]) +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- Miscellaneous - - - - - - Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers - using signed saturation, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - tmp_dst[7:0] := Saturate8(a[15:0]) - tmp_dst[15:8] := Saturate8(a[31:16]) - tmp_dst[23:16] := Saturate8(a[47:32]) - tmp_dst[31:24] := Saturate8(a[63:48]) - tmp_dst[39:32] := Saturate8(a[79:64]) - tmp_dst[47:40] := Saturate8(a[95:80]) - tmp_dst[55:48] := Saturate8(a[111:96]) - tmp_dst[63:56] := Saturate8(a[127:112]) - tmp_dst[71:64] := Saturate8(b[15:0]) - tmp_dst[79:72] := Saturate8(b[31:16]) - tmp_dst[87:80] := Saturate8(b[47:32]) - tmp_dst[95:88] := Saturate8(b[63:48]) - tmp_dst[103:96] := Saturate8(b[79:64]) - tmp_dst[111:104] := Saturate8(b[95:80]) - tmp_dst[119:112] := Saturate8(b[111:96]) - tmp_dst[127:120] := Saturate8(b[127:112]) - tmp_dst[135:128] := Saturate8(a[143:128]) - tmp_dst[143:136] := Saturate8(a[159:144]) - tmp_dst[151:144] := Saturate8(a[175:160]) - tmp_dst[159:152] := Saturate8(a[191:176]) - tmp_dst[167:160] := Saturate8(a[207:192]) - tmp_dst[175:168] := Saturate8(a[223:208]) - tmp_dst[183:176] := Saturate8(a[239:224]) - tmp_dst[191:184] := Saturate8(a[255:240]) - tmp_dst[199:192] := Saturate8(b[143:128]) - tmp_dst[207:200] := Saturate8(b[159:144]) - tmp_dst[215:208] := Saturate8(b[175:160]) - tmp_dst[223:216] := Saturate8(b[191:176]) - tmp_dst[231:224] := Saturate8(b[207:192]) - tmp_dst[239:232] := Saturate8(b[223:208]) - tmp_dst[247:240] := Saturate8(b[239:224]) - tmp_dst[255:248] := Saturate8(b[255:240]) - tmp_dst[263:256] := Saturate8(a[271:256]) - tmp_dst[271:264] := Saturate8(a[287:272]) - tmp_dst[279:272] := Saturate8(a[303:288]) - tmp_dst[287:280] := Saturate8(a[319:304]) - tmp_dst[295:288] := Saturate8(a[335:320]) - tmp_dst[303:296] := Saturate8(a[351:336]) - tmp_dst[311:304] := Saturate8(a[367:352]) - tmp_dst[319:312] := Saturate8(a[383:368]) - tmp_dst[327:320] := Saturate8(b[271:256]) - tmp_dst[335:328] := Saturate8(b[287:272]) - tmp_dst[343:336] := Saturate8(b[303:288]) - tmp_dst[351:344] := Saturate8(b[319:304]) - tmp_dst[359:352] := Saturate8(b[335:320]) - tmp_dst[367:360] := Saturate8(b[351:336]) - tmp_dst[375:368] := Saturate8(b[367:352]) - tmp_dst[383:376] := Saturate8(b[383:368]) - tmp_dst[391:384] := Saturate8(a[399:384]) - tmp_dst[399:392] := Saturate8(a[415:400]) - tmp_dst[407:400] := Saturate8(a[431:416]) - tmp_dst[415:408] := Saturate8(a[447:432]) - tmp_dst[423:416] := Saturate8(a[463:448]) - tmp_dst[431:424] := Saturate8(a[479:464]) - tmp_dst[439:432] := Saturate8(a[495:480]) - tmp_dst[447:440] := Saturate8(a[511:496]) - tmp_dst[455:448] := Saturate8(b[399:384]) - tmp_dst[463:456] := Saturate8(b[415:400]) - tmp_dst[471:464] := Saturate8(b[431:416]) - tmp_dst[479:472] := Saturate8(b[447:432]) - tmp_dst[487:480] := Saturate8(b[463:448]) - tmp_dst[495:488] := Saturate8(b[479:464]) - tmp_dst[503:496] := Saturate8(b[495:480]) - tmp_dst[511:504] := Saturate8(b[511:496]) - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Convert + Miscellaneous + + + + + + Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[7:0] := Saturate8(a[15:0]) +tmp_dst[15:8] := Saturate8(a[31:16]) +tmp_dst[23:16] := Saturate8(a[47:32]) +tmp_dst[31:24] := Saturate8(a[63:48]) +tmp_dst[39:32] := Saturate8(a[79:64]) +tmp_dst[47:40] := Saturate8(a[95:80]) +tmp_dst[55:48] := Saturate8(a[111:96]) +tmp_dst[63:56] := Saturate8(a[127:112]) +tmp_dst[71:64] := Saturate8(b[15:0]) +tmp_dst[79:72] := Saturate8(b[31:16]) +tmp_dst[87:80] := Saturate8(b[47:32]) +tmp_dst[95:88] := Saturate8(b[63:48]) +tmp_dst[103:96] := Saturate8(b[79:64]) +tmp_dst[111:104] := Saturate8(b[95:80]) +tmp_dst[119:112] := Saturate8(b[111:96]) +tmp_dst[127:120] := Saturate8(b[127:112]) +tmp_dst[135:128] := Saturate8(a[143:128]) +tmp_dst[143:136] := Saturate8(a[159:144]) +tmp_dst[151:144] := Saturate8(a[175:160]) +tmp_dst[159:152] := Saturate8(a[191:176]) +tmp_dst[167:160] := Saturate8(a[207:192]) +tmp_dst[175:168] := Saturate8(a[223:208]) +tmp_dst[183:176] := Saturate8(a[239:224]) +tmp_dst[191:184] := Saturate8(a[255:240]) +tmp_dst[199:192] := Saturate8(b[143:128]) +tmp_dst[207:200] := Saturate8(b[159:144]) +tmp_dst[215:208] := Saturate8(b[175:160]) +tmp_dst[223:216] := Saturate8(b[191:176]) +tmp_dst[231:224] := Saturate8(b[207:192]) +tmp_dst[239:232] := Saturate8(b[223:208]) +tmp_dst[247:240] := Saturate8(b[239:224]) +tmp_dst[255:248] := Saturate8(b[255:240]) +tmp_dst[263:256] := Saturate8(a[271:256]) +tmp_dst[271:264] := Saturate8(a[287:272]) +tmp_dst[279:272] := Saturate8(a[303:288]) +tmp_dst[287:280] := Saturate8(a[319:304]) +tmp_dst[295:288] := Saturate8(a[335:320]) +tmp_dst[303:296] := Saturate8(a[351:336]) +tmp_dst[311:304] := Saturate8(a[367:352]) +tmp_dst[319:312] := Saturate8(a[383:368]) +tmp_dst[327:320] := Saturate8(b[271:256]) +tmp_dst[335:328] := Saturate8(b[287:272]) +tmp_dst[343:336] := Saturate8(b[303:288]) +tmp_dst[351:344] := Saturate8(b[319:304]) +tmp_dst[359:352] := Saturate8(b[335:320]) +tmp_dst[367:360] := Saturate8(b[351:336]) +tmp_dst[375:368] := Saturate8(b[367:352]) +tmp_dst[383:376] := Saturate8(b[383:368]) +tmp_dst[391:384] := Saturate8(a[399:384]) +tmp_dst[399:392] := Saturate8(a[415:400]) +tmp_dst[407:400] := Saturate8(a[431:416]) +tmp_dst[415:408] := Saturate8(a[447:432]) +tmp_dst[423:416] := Saturate8(a[463:448]) +tmp_dst[431:424] := Saturate8(a[479:464]) +tmp_dst[439:432] := Saturate8(a[495:480]) +tmp_dst[447:440] := Saturate8(a[511:496]) +tmp_dst[455:448] := Saturate8(b[399:384]) +tmp_dst[463:456] := Saturate8(b[415:400]) +tmp_dst[471:464] := Saturate8(b[431:416]) +tmp_dst[479:472] := Saturate8(b[447:432]) +tmp_dst[487:480] := Saturate8(b[463:448]) +tmp_dst[495:488] := Saturate8(b[479:464]) +tmp_dst[503:496] := Saturate8(b[495:480]) +tmp_dst[511:504] := Saturate8(b[511:496]) +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- Miscellaneous - - - - - Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers - using signed saturation, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - tmp_dst[7:0] := Saturate8(a[15:0]) - tmp_dst[15:8] := Saturate8(a[31:16]) - tmp_dst[23:16] := Saturate8(a[47:32]) - tmp_dst[31:24] := Saturate8(a[63:48]) - tmp_dst[39:32] := Saturate8(a[79:64]) - tmp_dst[47:40] := Saturate8(a[95:80]) - tmp_dst[55:48] := Saturate8(a[111:96]) - tmp_dst[63:56] := Saturate8(a[127:112]) - tmp_dst[71:64] := Saturate8(b[15:0]) - tmp_dst[79:72] := Saturate8(b[31:16]) - tmp_dst[87:80] := Saturate8(b[47:32]) - tmp_dst[95:88] := Saturate8(b[63:48]) - tmp_dst[103:96] := Saturate8(b[79:64]) - tmp_dst[111:104] := Saturate8(b[95:80]) - tmp_dst[119:112] := Saturate8(b[111:96]) - tmp_dst[127:120] := Saturate8(b[127:112]) - tmp_dst[135:128] := Saturate8(a[143:128]) - tmp_dst[143:136] := Saturate8(a[159:144]) - tmp_dst[151:144] := Saturate8(a[175:160]) - tmp_dst[159:152] := Saturate8(a[191:176]) - tmp_dst[167:160] := Saturate8(a[207:192]) - tmp_dst[175:168] := Saturate8(a[223:208]) - tmp_dst[183:176] := Saturate8(a[239:224]) - tmp_dst[191:184] := Saturate8(a[255:240]) - tmp_dst[199:192] := Saturate8(b[143:128]) - tmp_dst[207:200] := Saturate8(b[159:144]) - tmp_dst[215:208] := Saturate8(b[175:160]) - tmp_dst[223:216] := Saturate8(b[191:176]) - tmp_dst[231:224] := Saturate8(b[207:192]) - tmp_dst[239:232] := Saturate8(b[223:208]) - tmp_dst[247:240] := Saturate8(b[239:224]) - tmp_dst[255:248] := Saturate8(b[255:240]) - tmp_dst[263:256] := Saturate8(a[271:256]) - tmp_dst[271:264] := Saturate8(a[287:272]) - tmp_dst[279:272] := Saturate8(a[303:288]) - tmp_dst[287:280] := Saturate8(a[319:304]) - tmp_dst[295:288] := Saturate8(a[335:320]) - tmp_dst[303:296] := Saturate8(a[351:336]) - tmp_dst[311:304] := Saturate8(a[367:352]) - tmp_dst[319:312] := Saturate8(a[383:368]) - tmp_dst[327:320] := Saturate8(b[271:256]) - tmp_dst[335:328] := Saturate8(b[287:272]) - tmp_dst[343:336] := Saturate8(b[303:288]) - tmp_dst[351:344] := Saturate8(b[319:304]) - tmp_dst[359:352] := Saturate8(b[335:320]) - tmp_dst[367:360] := Saturate8(b[351:336]) - tmp_dst[375:368] := Saturate8(b[367:352]) - tmp_dst[383:376] := Saturate8(b[383:368]) - tmp_dst[391:384] := Saturate8(a[399:384]) - tmp_dst[399:392] := Saturate8(a[415:400]) - tmp_dst[407:400] := Saturate8(a[431:416]) - tmp_dst[415:408] := Saturate8(a[447:432]) - tmp_dst[423:416] := Saturate8(a[463:448]) - tmp_dst[431:424] := Saturate8(a[479:464]) - tmp_dst[439:432] := Saturate8(a[495:480]) - tmp_dst[447:440] := Saturate8(a[511:496]) - tmp_dst[455:448] := Saturate8(b[399:384]) - tmp_dst[463:456] := Saturate8(b[415:400]) - tmp_dst[471:464] := Saturate8(b[431:416]) - tmp_dst[479:472] := Saturate8(b[447:432]) - tmp_dst[487:480] := Saturate8(b[463:448]) - tmp_dst[495:488] := Saturate8(b[479:464]) - tmp_dst[503:496] := Saturate8(b[495:480]) - tmp_dst[511:504] := Saturate8(b[511:496]) - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Convert + Miscellaneous + + + + + Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[7:0] := Saturate8(a[15:0]) +tmp_dst[15:8] := Saturate8(a[31:16]) +tmp_dst[23:16] := Saturate8(a[47:32]) +tmp_dst[31:24] := Saturate8(a[63:48]) +tmp_dst[39:32] := Saturate8(a[79:64]) +tmp_dst[47:40] := Saturate8(a[95:80]) +tmp_dst[55:48] := Saturate8(a[111:96]) +tmp_dst[63:56] := Saturate8(a[127:112]) +tmp_dst[71:64] := Saturate8(b[15:0]) +tmp_dst[79:72] := Saturate8(b[31:16]) +tmp_dst[87:80] := Saturate8(b[47:32]) +tmp_dst[95:88] := Saturate8(b[63:48]) +tmp_dst[103:96] := Saturate8(b[79:64]) +tmp_dst[111:104] := Saturate8(b[95:80]) +tmp_dst[119:112] := Saturate8(b[111:96]) +tmp_dst[127:120] := Saturate8(b[127:112]) +tmp_dst[135:128] := Saturate8(a[143:128]) +tmp_dst[143:136] := Saturate8(a[159:144]) +tmp_dst[151:144] := Saturate8(a[175:160]) +tmp_dst[159:152] := Saturate8(a[191:176]) +tmp_dst[167:160] := Saturate8(a[207:192]) +tmp_dst[175:168] := Saturate8(a[223:208]) +tmp_dst[183:176] := Saturate8(a[239:224]) +tmp_dst[191:184] := Saturate8(a[255:240]) +tmp_dst[199:192] := Saturate8(b[143:128]) +tmp_dst[207:200] := Saturate8(b[159:144]) +tmp_dst[215:208] := Saturate8(b[175:160]) +tmp_dst[223:216] := Saturate8(b[191:176]) +tmp_dst[231:224] := Saturate8(b[207:192]) +tmp_dst[239:232] := Saturate8(b[223:208]) +tmp_dst[247:240] := Saturate8(b[239:224]) +tmp_dst[255:248] := Saturate8(b[255:240]) +tmp_dst[263:256] := Saturate8(a[271:256]) +tmp_dst[271:264] := Saturate8(a[287:272]) +tmp_dst[279:272] := Saturate8(a[303:288]) +tmp_dst[287:280] := Saturate8(a[319:304]) +tmp_dst[295:288] := Saturate8(a[335:320]) +tmp_dst[303:296] := Saturate8(a[351:336]) +tmp_dst[311:304] := Saturate8(a[367:352]) +tmp_dst[319:312] := Saturate8(a[383:368]) +tmp_dst[327:320] := Saturate8(b[271:256]) +tmp_dst[335:328] := Saturate8(b[287:272]) +tmp_dst[343:336] := Saturate8(b[303:288]) +tmp_dst[351:344] := Saturate8(b[319:304]) +tmp_dst[359:352] := Saturate8(b[335:320]) +tmp_dst[367:360] := Saturate8(b[351:336]) +tmp_dst[375:368] := Saturate8(b[367:352]) +tmp_dst[383:376] := Saturate8(b[383:368]) +tmp_dst[391:384] := Saturate8(a[399:384]) +tmp_dst[399:392] := Saturate8(a[415:400]) +tmp_dst[407:400] := Saturate8(a[431:416]) +tmp_dst[415:408] := Saturate8(a[447:432]) +tmp_dst[423:416] := Saturate8(a[463:448]) +tmp_dst[431:424] := Saturate8(a[479:464]) +tmp_dst[439:432] := Saturate8(a[495:480]) +tmp_dst[447:440] := Saturate8(a[511:496]) +tmp_dst[455:448] := Saturate8(b[399:384]) +tmp_dst[463:456] := Saturate8(b[415:400]) +tmp_dst[471:464] := Saturate8(b[431:416]) +tmp_dst[479:472] := Saturate8(b[447:432]) +tmp_dst[487:480] := Saturate8(b[463:448]) +tmp_dst[495:488] := Saturate8(b[479:464]) +tmp_dst[503:496] := Saturate8(b[495:480]) +tmp_dst[511:504] := Saturate8(b[511:496]) +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- Miscellaneous - - - - Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers - using signed saturation, and store the results in "dst". - - dst[7:0] := Saturate8(a[15:0]) - dst[15:8] := Saturate8(a[31:16]) - dst[23:16] := Saturate8(a[47:32]) - dst[31:24] := Saturate8(a[63:48]) - dst[39:32] := Saturate8(a[79:64]) - dst[47:40] := Saturate8(a[95:80]) - dst[55:48] := Saturate8(a[111:96]) - dst[63:56] := Saturate8(a[127:112]) - dst[71:64] := Saturate8(b[15:0]) - dst[79:72] := Saturate8(b[31:16]) - dst[87:80] := Saturate8(b[47:32]) - dst[95:88] := Saturate8(b[63:48]) - dst[103:96] := Saturate8(b[79:64]) - dst[111:104] := Saturate8(b[95:80]) - dst[119:112] := Saturate8(b[111:96]) - dst[127:120] := Saturate8(b[127:112]) - dst[135:128] := Saturate8(a[143:128]) - dst[143:136] := Saturate8(a[159:144]) - dst[151:144] := Saturate8(a[175:160]) - dst[159:152] := Saturate8(a[191:176]) - dst[167:160] := Saturate8(a[207:192]) - dst[175:168] := Saturate8(a[223:208]) - dst[183:176] := Saturate8(a[239:224]) - dst[191:184] := Saturate8(a[255:240]) - dst[199:192] := Saturate8(b[143:128]) - dst[207:200] := Saturate8(b[159:144]) - dst[215:208] := Saturate8(b[175:160]) - dst[223:216] := Saturate8(b[191:176]) - dst[231:224] := Saturate8(b[207:192]) - dst[239:232] := Saturate8(b[223:208]) - dst[247:240] := Saturate8(b[239:224]) - dst[255:248] := Saturate8(b[255:240]) - dst[263:256] := Saturate8(a[271:256]) - dst[271:264] := Saturate8(a[287:272]) - dst[279:272] := Saturate8(a[303:288]) - dst[287:280] := Saturate8(a[319:304]) - dst[295:288] := Saturate8(a[335:320]) - dst[303:296] := Saturate8(a[351:336]) - dst[311:304] := Saturate8(a[367:352]) - dst[319:312] := Saturate8(a[383:368]) - dst[327:320] := Saturate8(b[271:256]) - dst[335:328] := Saturate8(b[287:272]) - dst[343:336] := Saturate8(b[303:288]) - dst[351:344] := Saturate8(b[319:304]) - dst[359:352] := Saturate8(b[335:320]) - dst[367:360] := Saturate8(b[351:336]) - dst[375:368] := Saturate8(b[367:352]) - dst[383:376] := Saturate8(b[383:368]) - dst[391:384] := Saturate8(a[399:384]) - dst[399:392] := Saturate8(a[415:400]) - dst[407:400] := Saturate8(a[431:416]) - dst[415:408] := Saturate8(a[447:432]) - dst[423:416] := Saturate8(a[463:448]) - dst[431:424] := Saturate8(a[479:464]) - dst[439:432] := Saturate8(a[495:480]) - dst[447:440] := Saturate8(a[511:496]) - dst[455:448] := Saturate8(b[399:384]) - dst[463:456] := Saturate8(b[415:400]) - dst[471:464] := Saturate8(b[431:416]) - dst[479:472] := Saturate8(b[447:432]) - dst[487:480] := Saturate8(b[463:448]) - dst[495:488] := Saturate8(b[479:464]) - dst[503:496] := Saturate8(b[495:480]) - dst[511:504] := Saturate8(b[511:496]) - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Convert + Miscellaneous + + + + Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst". + +dst[7:0] := Saturate8(a[15:0]) +dst[15:8] := Saturate8(a[31:16]) +dst[23:16] := Saturate8(a[47:32]) +dst[31:24] := Saturate8(a[63:48]) +dst[39:32] := Saturate8(a[79:64]) +dst[47:40] := Saturate8(a[95:80]) +dst[55:48] := Saturate8(a[111:96]) +dst[63:56] := Saturate8(a[127:112]) +dst[71:64] := Saturate8(b[15:0]) +dst[79:72] := Saturate8(b[31:16]) +dst[87:80] := Saturate8(b[47:32]) +dst[95:88] := Saturate8(b[63:48]) +dst[103:96] := Saturate8(b[79:64]) +dst[111:104] := Saturate8(b[95:80]) +dst[119:112] := Saturate8(b[111:96]) +dst[127:120] := Saturate8(b[127:112]) +dst[135:128] := Saturate8(a[143:128]) +dst[143:136] := Saturate8(a[159:144]) +dst[151:144] := Saturate8(a[175:160]) +dst[159:152] := Saturate8(a[191:176]) +dst[167:160] := Saturate8(a[207:192]) +dst[175:168] := Saturate8(a[223:208]) +dst[183:176] := Saturate8(a[239:224]) +dst[191:184] := Saturate8(a[255:240]) +dst[199:192] := Saturate8(b[143:128]) +dst[207:200] := Saturate8(b[159:144]) +dst[215:208] := Saturate8(b[175:160]) +dst[223:216] := Saturate8(b[191:176]) +dst[231:224] := Saturate8(b[207:192]) +dst[239:232] := Saturate8(b[223:208]) +dst[247:240] := Saturate8(b[239:224]) +dst[255:248] := Saturate8(b[255:240]) +dst[263:256] := Saturate8(a[271:256]) +dst[271:264] := Saturate8(a[287:272]) +dst[279:272] := Saturate8(a[303:288]) +dst[287:280] := Saturate8(a[319:304]) +dst[295:288] := Saturate8(a[335:320]) +dst[303:296] := Saturate8(a[351:336]) +dst[311:304] := Saturate8(a[367:352]) +dst[319:312] := Saturate8(a[383:368]) +dst[327:320] := Saturate8(b[271:256]) +dst[335:328] := Saturate8(b[287:272]) +dst[343:336] := Saturate8(b[303:288]) +dst[351:344] := Saturate8(b[319:304]) +dst[359:352] := Saturate8(b[335:320]) +dst[367:360] := Saturate8(b[351:336]) +dst[375:368] := Saturate8(b[367:352]) +dst[383:376] := Saturate8(b[383:368]) +dst[391:384] := Saturate8(a[399:384]) +dst[399:392] := Saturate8(a[415:400]) +dst[407:400] := Saturate8(a[431:416]) +dst[415:408] := Saturate8(a[447:432]) +dst[423:416] := Saturate8(a[463:448]) +dst[431:424] := Saturate8(a[479:464]) +dst[439:432] := Saturate8(a[495:480]) +dst[447:440] := Saturate8(a[511:496]) +dst[455:448] := Saturate8(b[399:384]) +dst[463:456] := Saturate8(b[415:400]) +dst[471:464] := Saturate8(b[431:416]) +dst[479:472] := Saturate8(b[447:432]) +dst[487:480] := Saturate8(b[463:448]) +dst[495:488] := Saturate8(b[479:464]) +dst[503:496] := Saturate8(b[495:480]) +dst[511:504] := Saturate8(b[511:496]) +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- Miscellaneous - - - - - - Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit - integers using unsigned saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - tmp_dst[15:0] := SaturateU16(a[31:0]) - tmp_dst[31:16] := SaturateU16(a[63:32]) - tmp_dst[47:32] := SaturateU16(a[95:64]) - tmp_dst[63:48] := SaturateU16(a[127:96]) - tmp_dst[79:64] := SaturateU16(b[31:0]) - tmp_dst[95:80] := SaturateU16(b[63:32]) - tmp_dst[111:96] := SaturateU16(b[95:64]) - tmp_dst[127:112] := SaturateU16(b[127:96]) - tmp_dst[143:128] := SaturateU16(a[159:128]) - tmp_dst[159:144] := SaturateU16(a[191:160]) - tmp_dst[175:160] := SaturateU16(a[223:192]) - tmp_dst[191:176] := SaturateU16(a[255:224]) - tmp_dst[207:192] := SaturateU16(b[159:128]) - tmp_dst[223:208] := SaturateU16(b[191:160]) - tmp_dst[239:224] := SaturateU16(b[223:192]) - tmp_dst[255:240] := SaturateU16(b[255:224]) - tmp_dst[271:256] := SaturateU16(a[287:256]) - tmp_dst[287:272] := SaturateU16(a[319:288]) - tmp_dst[303:288] := SaturateU16(a[351:320]) - tmp_dst[319:304] := SaturateU16(a[383:352]) - tmp_dst[335:320] := SaturateU16(b[287:256]) - tmp_dst[351:336] := SaturateU16(b[319:288]) - tmp_dst[367:352] := SaturateU16(b[351:320]) - tmp_dst[383:368] := SaturateU16(b[383:352]) - tmp_dst[399:384] := SaturateU16(a[415:384]) - tmp_dst[415:400] := SaturateU16(a[447:416]) - tmp_dst[431:416] := SaturateU16(a[479:448]) - tmp_dst[447:432] := SaturateU16(a[511:480]) - tmp_dst[463:448] := SaturateU16(b[415:384]) - tmp_dst[479:464] := SaturateU16(b[447:416]) - tmp_dst[495:480] := SaturateU16(b[479:448]) - tmp_dst[511:496] := SaturateU16(b[511:480]) - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Convert + Miscellaneous + + + + + + Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[15:0] := SaturateU16(a[31:0]) +tmp_dst[31:16] := SaturateU16(a[63:32]) +tmp_dst[47:32] := SaturateU16(a[95:64]) +tmp_dst[63:48] := SaturateU16(a[127:96]) +tmp_dst[79:64] := SaturateU16(b[31:0]) +tmp_dst[95:80] := SaturateU16(b[63:32]) +tmp_dst[111:96] := SaturateU16(b[95:64]) +tmp_dst[127:112] := SaturateU16(b[127:96]) +tmp_dst[143:128] := SaturateU16(a[159:128]) +tmp_dst[159:144] := SaturateU16(a[191:160]) +tmp_dst[175:160] := SaturateU16(a[223:192]) +tmp_dst[191:176] := SaturateU16(a[255:224]) +tmp_dst[207:192] := SaturateU16(b[159:128]) +tmp_dst[223:208] := SaturateU16(b[191:160]) +tmp_dst[239:224] := SaturateU16(b[223:192]) +tmp_dst[255:240] := SaturateU16(b[255:224]) +tmp_dst[271:256] := SaturateU16(a[287:256]) +tmp_dst[287:272] := SaturateU16(a[319:288]) +tmp_dst[303:288] := SaturateU16(a[351:320]) +tmp_dst[319:304] := SaturateU16(a[383:352]) +tmp_dst[335:320] := SaturateU16(b[287:256]) +tmp_dst[351:336] := SaturateU16(b[319:288]) +tmp_dst[367:352] := SaturateU16(b[351:320]) +tmp_dst[383:368] := SaturateU16(b[383:352]) +tmp_dst[399:384] := SaturateU16(a[415:384]) +tmp_dst[415:400] := SaturateU16(a[447:416]) +tmp_dst[431:416] := SaturateU16(a[479:448]) +tmp_dst[447:432] := SaturateU16(a[511:480]) +tmp_dst[463:448] := SaturateU16(b[415:384]) +tmp_dst[479:464] := SaturateU16(b[447:416]) +tmp_dst[495:480] := SaturateU16(b[479:448]) +tmp_dst[511:496] := SaturateU16(b[511:480]) +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- Miscellaneous - - - - - Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit - integers using unsigned saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - tmp_dst[15:0] := SaturateU16(a[31:0]) - tmp_dst[31:16] := SaturateU16(a[63:32]) - tmp_dst[47:32] := SaturateU16(a[95:64]) - tmp_dst[63:48] := SaturateU16(a[127:96]) - tmp_dst[79:64] := SaturateU16(b[31:0]) - tmp_dst[95:80] := SaturateU16(b[63:32]) - tmp_dst[111:96] := SaturateU16(b[95:64]) - tmp_dst[127:112] := SaturateU16(b[127:96]) - tmp_dst[143:128] := SaturateU16(a[159:128]) - tmp_dst[159:144] := SaturateU16(a[191:160]) - tmp_dst[175:160] := SaturateU16(a[223:192]) - tmp_dst[191:176] := SaturateU16(a[255:224]) - tmp_dst[207:192] := SaturateU16(b[159:128]) - tmp_dst[223:208] := SaturateU16(b[191:160]) - tmp_dst[239:224] := SaturateU16(b[223:192]) - tmp_dst[255:240] := SaturateU16(b[255:224]) - tmp_dst[271:256] := SaturateU16(a[287:256]) - tmp_dst[287:272] := SaturateU16(a[319:288]) - tmp_dst[303:288] := SaturateU16(a[351:320]) - tmp_dst[319:304] := SaturateU16(a[383:352]) - tmp_dst[335:320] := SaturateU16(b[287:256]) - tmp_dst[351:336] := SaturateU16(b[319:288]) - tmp_dst[367:352] := SaturateU16(b[351:320]) - tmp_dst[383:368] := SaturateU16(b[383:352]) - tmp_dst[399:384] := SaturateU16(a[415:384]) - tmp_dst[415:400] := SaturateU16(a[447:416]) - tmp_dst[431:416] := SaturateU16(a[479:448]) - tmp_dst[447:432] := SaturateU16(a[511:480]) - tmp_dst[463:448] := SaturateU16(b[415:384]) - tmp_dst[479:464] := SaturateU16(b[447:416]) - tmp_dst[495:480] := SaturateU16(b[479:448]) - tmp_dst[511:496] := SaturateU16(b[511:480]) - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := tmp_dst[i+15:i] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Convert + Miscellaneous + + + + + Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[15:0] := SaturateU16(a[31:0]) +tmp_dst[31:16] := SaturateU16(a[63:32]) +tmp_dst[47:32] := SaturateU16(a[95:64]) +tmp_dst[63:48] := SaturateU16(a[127:96]) +tmp_dst[79:64] := SaturateU16(b[31:0]) +tmp_dst[95:80] := SaturateU16(b[63:32]) +tmp_dst[111:96] := SaturateU16(b[95:64]) +tmp_dst[127:112] := SaturateU16(b[127:96]) +tmp_dst[143:128] := SaturateU16(a[159:128]) +tmp_dst[159:144] := SaturateU16(a[191:160]) +tmp_dst[175:160] := SaturateU16(a[223:192]) +tmp_dst[191:176] := SaturateU16(a[255:224]) +tmp_dst[207:192] := SaturateU16(b[159:128]) +tmp_dst[223:208] := SaturateU16(b[191:160]) +tmp_dst[239:224] := SaturateU16(b[223:192]) +tmp_dst[255:240] := SaturateU16(b[255:224]) +tmp_dst[271:256] := SaturateU16(a[287:256]) +tmp_dst[287:272] := SaturateU16(a[319:288]) +tmp_dst[303:288] := SaturateU16(a[351:320]) +tmp_dst[319:304] := SaturateU16(a[383:352]) +tmp_dst[335:320] := SaturateU16(b[287:256]) +tmp_dst[351:336] := SaturateU16(b[319:288]) +tmp_dst[367:352] := SaturateU16(b[351:320]) +tmp_dst[383:368] := SaturateU16(b[383:352]) +tmp_dst[399:384] := SaturateU16(a[415:384]) +tmp_dst[415:400] := SaturateU16(a[447:416]) +tmp_dst[431:416] := SaturateU16(a[479:448]) +tmp_dst[447:432] := SaturateU16(a[511:480]) +tmp_dst[463:448] := SaturateU16(b[415:384]) +tmp_dst[479:464] := SaturateU16(b[447:416]) +tmp_dst[495:480] := SaturateU16(b[479:448]) +tmp_dst[511:496] := SaturateU16(b[511:480]) +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := tmp_dst[i+15:i] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- Miscellaneous - - - - Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit - integers using unsigned saturation, and store the results in "dst". - - dst[15:0] := SaturateU16(a[31:0]) - dst[31:16] := SaturateU16(a[63:32]) - dst[47:32] := SaturateU16(a[95:64]) - dst[63:48] := SaturateU16(a[127:96]) - dst[79:64] := SaturateU16(b[31:0]) - dst[95:80] := SaturateU16(b[63:32]) - dst[111:96] := SaturateU16(b[95:64]) - dst[127:112] := SaturateU16(b[127:96]) - dst[143:128] := SaturateU16(a[159:128]) - dst[159:144] := SaturateU16(a[191:160]) - dst[175:160] := SaturateU16(a[223:192]) - dst[191:176] := SaturateU16(a[255:224]) - dst[207:192] := SaturateU16(b[159:128]) - dst[223:208] := SaturateU16(b[191:160]) - dst[239:224] := SaturateU16(b[223:192]) - dst[255:240] := SaturateU16(b[255:224]) - dst[271:256] := SaturateU16(a[287:256]) - dst[287:272] := SaturateU16(a[319:288]) - dst[303:288] := SaturateU16(a[351:320]) - dst[319:304] := SaturateU16(a[383:352]) - dst[335:320] := SaturateU16(b[287:256]) - dst[351:336] := SaturateU16(b[319:288]) - dst[367:352] := SaturateU16(b[351:320]) - dst[383:368] := SaturateU16(b[383:352]) - dst[399:384] := SaturateU16(a[415:384]) - dst[415:400] := SaturateU16(a[447:416]) - dst[431:416] := SaturateU16(a[479:448]) - dst[447:432] := SaturateU16(a[511:480]) - dst[463:448] := SaturateU16(b[415:384]) - dst[479:464] := SaturateU16(b[447:416]) - dst[495:480] := SaturateU16(b[479:448]) - dst[511:496] := SaturateU16(b[511:480]) - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Convert + Miscellaneous + + + + Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst". + +dst[15:0] := SaturateU16(a[31:0]) +dst[31:16] := SaturateU16(a[63:32]) +dst[47:32] := SaturateU16(a[95:64]) +dst[63:48] := SaturateU16(a[127:96]) +dst[79:64] := SaturateU16(b[31:0]) +dst[95:80] := SaturateU16(b[63:32]) +dst[111:96] := SaturateU16(b[95:64]) +dst[127:112] := SaturateU16(b[127:96]) +dst[143:128] := SaturateU16(a[159:128]) +dst[159:144] := SaturateU16(a[191:160]) +dst[175:160] := SaturateU16(a[223:192]) +dst[191:176] := SaturateU16(a[255:224]) +dst[207:192] := SaturateU16(b[159:128]) +dst[223:208] := SaturateU16(b[191:160]) +dst[239:224] := SaturateU16(b[223:192]) +dst[255:240] := SaturateU16(b[255:224]) +dst[271:256] := SaturateU16(a[287:256]) +dst[287:272] := SaturateU16(a[319:288]) +dst[303:288] := SaturateU16(a[351:320]) +dst[319:304] := SaturateU16(a[383:352]) +dst[335:320] := SaturateU16(b[287:256]) +dst[351:336] := SaturateU16(b[319:288]) +dst[367:352] := SaturateU16(b[351:320]) +dst[383:368] := SaturateU16(b[383:352]) +dst[399:384] := SaturateU16(a[415:384]) +dst[415:400] := SaturateU16(a[447:416]) +dst[431:416] := SaturateU16(a[479:448]) +dst[447:432] := SaturateU16(a[511:480]) +dst[463:448] := SaturateU16(b[415:384]) +dst[479:464] := SaturateU16(b[447:416]) +dst[495:480] := SaturateU16(b[479:448]) +dst[511:496] := SaturateU16(b[511:480]) +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- Miscellaneous - - - - - - Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers - using unsigned saturation, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - tmp_dst[7:0] := SaturateU8(a[15:0]) - tmp_dst[15:8] := SaturateU8(a[31:16]) - tmp_dst[23:16] := SaturateU8(a[47:32]) - tmp_dst[31:24] := SaturateU8(a[63:48]) - tmp_dst[39:32] := SaturateU8(a[79:64]) - tmp_dst[47:40] := SaturateU8(a[95:80]) - tmp_dst[55:48] := SaturateU8(a[111:96]) - tmp_dst[63:56] := SaturateU8(a[127:112]) - tmp_dst[71:64] := SaturateU8(b[15:0]) - tmp_dst[79:72] := SaturateU8(b[31:16]) - tmp_dst[87:80] := SaturateU8(b[47:32]) - tmp_dst[95:88] := SaturateU8(b[63:48]) - tmp_dst[103:96] := SaturateU8(b[79:64]) - tmp_dst[111:104] := SaturateU8(b[95:80]) - tmp_dst[119:112] := SaturateU8(b[111:96]) - tmp_dst[127:120] := SaturateU8(b[127:112]) - tmp_dst[135:128] := SaturateU8(a[143:128]) - tmp_dst[143:136] := SaturateU8(a[159:144]) - tmp_dst[151:144] := SaturateU8(a[175:160]) - tmp_dst[159:152] := SaturateU8(a[191:176]) - tmp_dst[167:160] := SaturateU8(a[207:192]) - tmp_dst[175:168] := SaturateU8(a[223:208]) - tmp_dst[183:176] := SaturateU8(a[239:224]) - tmp_dst[191:184] := SaturateU8(a[255:240]) - tmp_dst[199:192] := SaturateU8(b[143:128]) - tmp_dst[207:200] := SaturateU8(b[159:144]) - tmp_dst[215:208] := SaturateU8(b[175:160]) - tmp_dst[223:216] := SaturateU8(b[191:176]) - tmp_dst[231:224] := SaturateU8(b[207:192]) - tmp_dst[239:232] := SaturateU8(b[223:208]) - tmp_dst[247:240] := SaturateU8(b[239:224]) - tmp_dst[255:248] := SaturateU8(b[255:240]) - tmp_dst[263:256] := SaturateU8(a[271:256]) - tmp_dst[271:264] := SaturateU8(a[287:272]) - tmp_dst[279:272] := SaturateU8(a[303:288]) - tmp_dst[287:280] := SaturateU8(a[319:304]) - tmp_dst[295:288] := SaturateU8(a[335:320]) - tmp_dst[303:296] := SaturateU8(a[351:336]) - tmp_dst[311:304] := SaturateU8(a[367:352]) - tmp_dst[319:312] := SaturateU8(a[383:368]) - tmp_dst[327:320] := SaturateU8(b[271:256]) - tmp_dst[335:328] := SaturateU8(b[287:272]) - tmp_dst[343:336] := SaturateU8(b[303:288]) - tmp_dst[351:344] := SaturateU8(b[319:304]) - tmp_dst[359:352] := SaturateU8(b[335:320]) - tmp_dst[367:360] := SaturateU8(b[351:336]) - tmp_dst[375:368] := SaturateU8(b[367:352]) - tmp_dst[383:376] := SaturateU8(b[383:368]) - tmp_dst[391:384] := SaturateU8(a[399:384]) - tmp_dst[399:392] := SaturateU8(a[415:400]) - tmp_dst[407:400] := SaturateU8(a[431:416]) - tmp_dst[415:408] := SaturateU8(a[447:432]) - tmp_dst[423:416] := SaturateU8(a[463:448]) - tmp_dst[431:424] := SaturateU8(a[479:464]) - tmp_dst[439:432] := SaturateU8(a[495:480]) - tmp_dst[447:440] := SaturateU8(a[511:496]) - tmp_dst[455:448] := SaturateU8(b[399:384]) - tmp_dst[463:456] := SaturateU8(b[415:400]) - tmp_dst[471:464] := SaturateU8(b[431:416]) - tmp_dst[479:472] := SaturateU8(b[447:432]) - tmp_dst[487:480] := SaturateU8(b[463:448]) - tmp_dst[495:488] := SaturateU8(b[479:464]) - tmp_dst[503:496] := SaturateU8(b[495:480]) - tmp_dst[511:504] := SaturateU8(b[511:496]) - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Convert + Miscellaneous + + + + + + Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[7:0] := SaturateU8(a[15:0]) +tmp_dst[15:8] := SaturateU8(a[31:16]) +tmp_dst[23:16] := SaturateU8(a[47:32]) +tmp_dst[31:24] := SaturateU8(a[63:48]) +tmp_dst[39:32] := SaturateU8(a[79:64]) +tmp_dst[47:40] := SaturateU8(a[95:80]) +tmp_dst[55:48] := SaturateU8(a[111:96]) +tmp_dst[63:56] := SaturateU8(a[127:112]) +tmp_dst[71:64] := SaturateU8(b[15:0]) +tmp_dst[79:72] := SaturateU8(b[31:16]) +tmp_dst[87:80] := SaturateU8(b[47:32]) +tmp_dst[95:88] := SaturateU8(b[63:48]) +tmp_dst[103:96] := SaturateU8(b[79:64]) +tmp_dst[111:104] := SaturateU8(b[95:80]) +tmp_dst[119:112] := SaturateU8(b[111:96]) +tmp_dst[127:120] := SaturateU8(b[127:112]) +tmp_dst[135:128] := SaturateU8(a[143:128]) +tmp_dst[143:136] := SaturateU8(a[159:144]) +tmp_dst[151:144] := SaturateU8(a[175:160]) +tmp_dst[159:152] := SaturateU8(a[191:176]) +tmp_dst[167:160] := SaturateU8(a[207:192]) +tmp_dst[175:168] := SaturateU8(a[223:208]) +tmp_dst[183:176] := SaturateU8(a[239:224]) +tmp_dst[191:184] := SaturateU8(a[255:240]) +tmp_dst[199:192] := SaturateU8(b[143:128]) +tmp_dst[207:200] := SaturateU8(b[159:144]) +tmp_dst[215:208] := SaturateU8(b[175:160]) +tmp_dst[223:216] := SaturateU8(b[191:176]) +tmp_dst[231:224] := SaturateU8(b[207:192]) +tmp_dst[239:232] := SaturateU8(b[223:208]) +tmp_dst[247:240] := SaturateU8(b[239:224]) +tmp_dst[255:248] := SaturateU8(b[255:240]) +tmp_dst[263:256] := SaturateU8(a[271:256]) +tmp_dst[271:264] := SaturateU8(a[287:272]) +tmp_dst[279:272] := SaturateU8(a[303:288]) +tmp_dst[287:280] := SaturateU8(a[319:304]) +tmp_dst[295:288] := SaturateU8(a[335:320]) +tmp_dst[303:296] := SaturateU8(a[351:336]) +tmp_dst[311:304] := SaturateU8(a[367:352]) +tmp_dst[319:312] := SaturateU8(a[383:368]) +tmp_dst[327:320] := SaturateU8(b[271:256]) +tmp_dst[335:328] := SaturateU8(b[287:272]) +tmp_dst[343:336] := SaturateU8(b[303:288]) +tmp_dst[351:344] := SaturateU8(b[319:304]) +tmp_dst[359:352] := SaturateU8(b[335:320]) +tmp_dst[367:360] := SaturateU8(b[351:336]) +tmp_dst[375:368] := SaturateU8(b[367:352]) +tmp_dst[383:376] := SaturateU8(b[383:368]) +tmp_dst[391:384] := SaturateU8(a[399:384]) +tmp_dst[399:392] := SaturateU8(a[415:400]) +tmp_dst[407:400] := SaturateU8(a[431:416]) +tmp_dst[415:408] := SaturateU8(a[447:432]) +tmp_dst[423:416] := SaturateU8(a[463:448]) +tmp_dst[431:424] := SaturateU8(a[479:464]) +tmp_dst[439:432] := SaturateU8(a[495:480]) +tmp_dst[447:440] := SaturateU8(a[511:496]) +tmp_dst[455:448] := SaturateU8(b[399:384]) +tmp_dst[463:456] := SaturateU8(b[415:400]) +tmp_dst[471:464] := SaturateU8(b[431:416]) +tmp_dst[479:472] := SaturateU8(b[447:432]) +tmp_dst[487:480] := SaturateU8(b[463:448]) +tmp_dst[495:488] := SaturateU8(b[479:464]) +tmp_dst[503:496] := SaturateU8(b[495:480]) +tmp_dst[511:504] := SaturateU8(b[511:496]) +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- Miscellaneous - - - - - Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers - using unsigned saturation, and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - tmp_dst[7:0] := SaturateU8(a[15:0]) - tmp_dst[15:8] := SaturateU8(a[31:16]) - tmp_dst[23:16] := SaturateU8(a[47:32]) - tmp_dst[31:24] := SaturateU8(a[63:48]) - tmp_dst[39:32] := SaturateU8(a[79:64]) - tmp_dst[47:40] := SaturateU8(a[95:80]) - tmp_dst[55:48] := SaturateU8(a[111:96]) - tmp_dst[63:56] := SaturateU8(a[127:112]) - tmp_dst[71:64] := SaturateU8(b[15:0]) - tmp_dst[79:72] := SaturateU8(b[31:16]) - tmp_dst[87:80] := SaturateU8(b[47:32]) - tmp_dst[95:88] := SaturateU8(b[63:48]) - tmp_dst[103:96] := SaturateU8(b[79:64]) - tmp_dst[111:104] := SaturateU8(b[95:80]) - tmp_dst[119:112] := SaturateU8(b[111:96]) - tmp_dst[127:120] := SaturateU8(b[127:112]) - tmp_dst[135:128] := SaturateU8(a[143:128]) - tmp_dst[143:136] := SaturateU8(a[159:144]) - tmp_dst[151:144] := SaturateU8(a[175:160]) - tmp_dst[159:152] := SaturateU8(a[191:176]) - tmp_dst[167:160] := SaturateU8(a[207:192]) - tmp_dst[175:168] := SaturateU8(a[223:208]) - tmp_dst[183:176] := SaturateU8(a[239:224]) - tmp_dst[191:184] := SaturateU8(a[255:240]) - tmp_dst[199:192] := SaturateU8(b[143:128]) - tmp_dst[207:200] := SaturateU8(b[159:144]) - tmp_dst[215:208] := SaturateU8(b[175:160]) - tmp_dst[223:216] := SaturateU8(b[191:176]) - tmp_dst[231:224] := SaturateU8(b[207:192]) - tmp_dst[239:232] := SaturateU8(b[223:208]) - tmp_dst[247:240] := SaturateU8(b[239:224]) - tmp_dst[255:248] := SaturateU8(b[255:240]) - tmp_dst[263:256] := SaturateU8(a[271:256]) - tmp_dst[271:264] := SaturateU8(a[287:272]) - tmp_dst[279:272] := SaturateU8(a[303:288]) - tmp_dst[287:280] := SaturateU8(a[319:304]) - tmp_dst[295:288] := SaturateU8(a[335:320]) - tmp_dst[303:296] := SaturateU8(a[351:336]) - tmp_dst[311:304] := SaturateU8(a[367:352]) - tmp_dst[319:312] := SaturateU8(a[383:368]) - tmp_dst[327:320] := SaturateU8(b[271:256]) - tmp_dst[335:328] := SaturateU8(b[287:272]) - tmp_dst[343:336] := SaturateU8(b[303:288]) - tmp_dst[351:344] := SaturateU8(b[319:304]) - tmp_dst[359:352] := SaturateU8(b[335:320]) - tmp_dst[367:360] := SaturateU8(b[351:336]) - tmp_dst[375:368] := SaturateU8(b[367:352]) - tmp_dst[383:376] := SaturateU8(b[383:368]) - tmp_dst[391:384] := SaturateU8(a[399:384]) - tmp_dst[399:392] := SaturateU8(a[415:400]) - tmp_dst[407:400] := SaturateU8(a[431:416]) - tmp_dst[415:408] := SaturateU8(a[447:432]) - tmp_dst[423:416] := SaturateU8(a[463:448]) - tmp_dst[431:424] := SaturateU8(a[479:464]) - tmp_dst[439:432] := SaturateU8(a[495:480]) - tmp_dst[447:440] := SaturateU8(a[511:496]) - tmp_dst[455:448] := SaturateU8(b[399:384]) - tmp_dst[463:456] := SaturateU8(b[415:400]) - tmp_dst[471:464] := SaturateU8(b[431:416]) - tmp_dst[479:472] := SaturateU8(b[447:432]) - tmp_dst[487:480] := SaturateU8(b[463:448]) - tmp_dst[495:488] := SaturateU8(b[479:464]) - tmp_dst[503:496] := SaturateU8(b[495:480]) - tmp_dst[511:504] := SaturateU8(b[511:496]) - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := tmp_dst[i+7:i] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Convert + Miscellaneous + + + + + Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[7:0] := SaturateU8(a[15:0]) +tmp_dst[15:8] := SaturateU8(a[31:16]) +tmp_dst[23:16] := SaturateU8(a[47:32]) +tmp_dst[31:24] := SaturateU8(a[63:48]) +tmp_dst[39:32] := SaturateU8(a[79:64]) +tmp_dst[47:40] := SaturateU8(a[95:80]) +tmp_dst[55:48] := SaturateU8(a[111:96]) +tmp_dst[63:56] := SaturateU8(a[127:112]) +tmp_dst[71:64] := SaturateU8(b[15:0]) +tmp_dst[79:72] := SaturateU8(b[31:16]) +tmp_dst[87:80] := SaturateU8(b[47:32]) +tmp_dst[95:88] := SaturateU8(b[63:48]) +tmp_dst[103:96] := SaturateU8(b[79:64]) +tmp_dst[111:104] := SaturateU8(b[95:80]) +tmp_dst[119:112] := SaturateU8(b[111:96]) +tmp_dst[127:120] := SaturateU8(b[127:112]) +tmp_dst[135:128] := SaturateU8(a[143:128]) +tmp_dst[143:136] := SaturateU8(a[159:144]) +tmp_dst[151:144] := SaturateU8(a[175:160]) +tmp_dst[159:152] := SaturateU8(a[191:176]) +tmp_dst[167:160] := SaturateU8(a[207:192]) +tmp_dst[175:168] := SaturateU8(a[223:208]) +tmp_dst[183:176] := SaturateU8(a[239:224]) +tmp_dst[191:184] := SaturateU8(a[255:240]) +tmp_dst[199:192] := SaturateU8(b[143:128]) +tmp_dst[207:200] := SaturateU8(b[159:144]) +tmp_dst[215:208] := SaturateU8(b[175:160]) +tmp_dst[223:216] := SaturateU8(b[191:176]) +tmp_dst[231:224] := SaturateU8(b[207:192]) +tmp_dst[239:232] := SaturateU8(b[223:208]) +tmp_dst[247:240] := SaturateU8(b[239:224]) +tmp_dst[255:248] := SaturateU8(b[255:240]) +tmp_dst[263:256] := SaturateU8(a[271:256]) +tmp_dst[271:264] := SaturateU8(a[287:272]) +tmp_dst[279:272] := SaturateU8(a[303:288]) +tmp_dst[287:280] := SaturateU8(a[319:304]) +tmp_dst[295:288] := SaturateU8(a[335:320]) +tmp_dst[303:296] := SaturateU8(a[351:336]) +tmp_dst[311:304] := SaturateU8(a[367:352]) +tmp_dst[319:312] := SaturateU8(a[383:368]) +tmp_dst[327:320] := SaturateU8(b[271:256]) +tmp_dst[335:328] := SaturateU8(b[287:272]) +tmp_dst[343:336] := SaturateU8(b[303:288]) +tmp_dst[351:344] := SaturateU8(b[319:304]) +tmp_dst[359:352] := SaturateU8(b[335:320]) +tmp_dst[367:360] := SaturateU8(b[351:336]) +tmp_dst[375:368] := SaturateU8(b[367:352]) +tmp_dst[383:376] := SaturateU8(b[383:368]) +tmp_dst[391:384] := SaturateU8(a[399:384]) +tmp_dst[399:392] := SaturateU8(a[415:400]) +tmp_dst[407:400] := SaturateU8(a[431:416]) +tmp_dst[415:408] := SaturateU8(a[447:432]) +tmp_dst[423:416] := SaturateU8(a[463:448]) +tmp_dst[431:424] := SaturateU8(a[479:464]) +tmp_dst[439:432] := SaturateU8(a[495:480]) +tmp_dst[447:440] := SaturateU8(a[511:496]) +tmp_dst[455:448] := SaturateU8(b[399:384]) +tmp_dst[463:456] := SaturateU8(b[415:400]) +tmp_dst[471:464] := SaturateU8(b[431:416]) +tmp_dst[479:472] := SaturateU8(b[447:432]) +tmp_dst[487:480] := SaturateU8(b[463:448]) +tmp_dst[495:488] := SaturateU8(b[479:464]) +tmp_dst[503:496] := SaturateU8(b[495:480]) +tmp_dst[511:504] := SaturateU8(b[511:496]) +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := tmp_dst[i+7:i] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- Miscellaneous - - - - Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers - using unsigned saturation, and store the results in "dst". - - dst[7:0] := SaturateU8(a[15:0]) - dst[15:8] := SaturateU8(a[31:16]) - dst[23:16] := SaturateU8(a[47:32]) - dst[31:24] := SaturateU8(a[63:48]) - dst[39:32] := SaturateU8(a[79:64]) - dst[47:40] := SaturateU8(a[95:80]) - dst[55:48] := SaturateU8(a[111:96]) - dst[63:56] := SaturateU8(a[127:112]) - dst[71:64] := SaturateU8(b[15:0]) - dst[79:72] := SaturateU8(b[31:16]) - dst[87:80] := SaturateU8(b[47:32]) - dst[95:88] := SaturateU8(b[63:48]) - dst[103:96] := SaturateU8(b[79:64]) - dst[111:104] := SaturateU8(b[95:80]) - dst[119:112] := SaturateU8(b[111:96]) - dst[127:120] := SaturateU8(b[127:112]) - dst[135:128] := SaturateU8(a[143:128]) - dst[143:136] := SaturateU8(a[159:144]) - dst[151:144] := SaturateU8(a[175:160]) - dst[159:152] := SaturateU8(a[191:176]) - dst[167:160] := SaturateU8(a[207:192]) - dst[175:168] := SaturateU8(a[223:208]) - dst[183:176] := SaturateU8(a[239:224]) - dst[191:184] := SaturateU8(a[255:240]) - dst[199:192] := SaturateU8(b[143:128]) - dst[207:200] := SaturateU8(b[159:144]) - dst[215:208] := SaturateU8(b[175:160]) - dst[223:216] := SaturateU8(b[191:176]) - dst[231:224] := SaturateU8(b[207:192]) - dst[239:232] := SaturateU8(b[223:208]) - dst[247:240] := SaturateU8(b[239:224]) - dst[255:248] := SaturateU8(b[255:240]) - dst[263:256] := SaturateU8(a[271:256]) - dst[271:264] := SaturateU8(a[287:272]) - dst[279:272] := SaturateU8(a[303:288]) - dst[287:280] := SaturateU8(a[319:304]) - dst[295:288] := SaturateU8(a[335:320]) - dst[303:296] := SaturateU8(a[351:336]) - dst[311:304] := SaturateU8(a[367:352]) - dst[319:312] := SaturateU8(a[383:368]) - dst[327:320] := SaturateU8(b[271:256]) - dst[335:328] := SaturateU8(b[287:272]) - dst[343:336] := SaturateU8(b[303:288]) - dst[351:344] := SaturateU8(b[319:304]) - dst[359:352] := SaturateU8(b[335:320]) - dst[367:360] := SaturateU8(b[351:336]) - dst[375:368] := SaturateU8(b[367:352]) - dst[383:376] := SaturateU8(b[383:368]) - dst[391:384] := SaturateU8(a[399:384]) - dst[399:392] := SaturateU8(a[415:400]) - dst[407:400] := SaturateU8(a[431:416]) - dst[415:408] := SaturateU8(a[447:432]) - dst[423:416] := SaturateU8(a[463:448]) - dst[431:424] := SaturateU8(a[479:464]) - dst[439:432] := SaturateU8(a[495:480]) - dst[447:440] := SaturateU8(a[511:496]) - dst[455:448] := SaturateU8(b[399:384]) - dst[463:456] := SaturateU8(b[415:400]) - dst[471:464] := SaturateU8(b[431:416]) - dst[479:472] := SaturateU8(b[447:432]) - dst[487:480] := SaturateU8(b[463:448]) - dst[495:488] := SaturateU8(b[479:464]) - dst[503:496] := SaturateU8(b[495:480]) - dst[511:504] := SaturateU8(b[511:496]) - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Convert + Miscellaneous + + + + Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst". + +dst[7:0] := SaturateU8(a[15:0]) +dst[15:8] := SaturateU8(a[31:16]) +dst[23:16] := SaturateU8(a[47:32]) +dst[31:24] := SaturateU8(a[63:48]) +dst[39:32] := SaturateU8(a[79:64]) +dst[47:40] := SaturateU8(a[95:80]) +dst[55:48] := SaturateU8(a[111:96]) +dst[63:56] := SaturateU8(a[127:112]) +dst[71:64] := SaturateU8(b[15:0]) +dst[79:72] := SaturateU8(b[31:16]) +dst[87:80] := SaturateU8(b[47:32]) +dst[95:88] := SaturateU8(b[63:48]) +dst[103:96] := SaturateU8(b[79:64]) +dst[111:104] := SaturateU8(b[95:80]) +dst[119:112] := SaturateU8(b[111:96]) +dst[127:120] := SaturateU8(b[127:112]) +dst[135:128] := SaturateU8(a[143:128]) +dst[143:136] := SaturateU8(a[159:144]) +dst[151:144] := SaturateU8(a[175:160]) +dst[159:152] := SaturateU8(a[191:176]) +dst[167:160] := SaturateU8(a[207:192]) +dst[175:168] := SaturateU8(a[223:208]) +dst[183:176] := SaturateU8(a[239:224]) +dst[191:184] := SaturateU8(a[255:240]) +dst[199:192] := SaturateU8(b[143:128]) +dst[207:200] := SaturateU8(b[159:144]) +dst[215:208] := SaturateU8(b[175:160]) +dst[223:216] := SaturateU8(b[191:176]) +dst[231:224] := SaturateU8(b[207:192]) +dst[239:232] := SaturateU8(b[223:208]) +dst[247:240] := SaturateU8(b[239:224]) +dst[255:248] := SaturateU8(b[255:240]) +dst[263:256] := SaturateU8(a[271:256]) +dst[271:264] := SaturateU8(a[287:272]) +dst[279:272] := SaturateU8(a[303:288]) +dst[287:280] := SaturateU8(a[319:304]) +dst[295:288] := SaturateU8(a[335:320]) +dst[303:296] := SaturateU8(a[351:336]) +dst[311:304] := SaturateU8(a[367:352]) +dst[319:312] := SaturateU8(a[383:368]) +dst[327:320] := SaturateU8(b[271:256]) +dst[335:328] := SaturateU8(b[287:272]) +dst[343:336] := SaturateU8(b[303:288]) +dst[351:344] := SaturateU8(b[319:304]) +dst[359:352] := SaturateU8(b[335:320]) +dst[367:360] := SaturateU8(b[351:336]) +dst[375:368] := SaturateU8(b[367:352]) +dst[383:376] := SaturateU8(b[383:368]) +dst[391:384] := SaturateU8(a[399:384]) +dst[399:392] := SaturateU8(a[415:400]) +dst[407:400] := SaturateU8(a[431:416]) +dst[415:408] := SaturateU8(a[447:432]) +dst[423:416] := SaturateU8(a[463:448]) +dst[431:424] := SaturateU8(a[479:464]) +dst[439:432] := SaturateU8(a[495:480]) +dst[447:440] := SaturateU8(a[511:496]) +dst[455:448] := SaturateU8(b[399:384]) +dst[463:456] := SaturateU8(b[415:400]) +dst[471:464] := SaturateU8(b[431:416]) +dst[479:472] := SaturateU8(b[447:432]) +dst[487:480] := SaturateU8(b[463:448]) +dst[495:488] := SaturateU8(b[479:464]) +dst[503:496] := SaturateU8(b[495:480]) +dst[511:504] := SaturateU8(b[511:496]) +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- - - Convert packed signed 16-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst". - - FOR j := 0 to 31 - i := 16*j - l := 8*j - dst[l+7:l] := Saturate8(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW -
immintrin.h
- Convert + + + Convert packed signed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 31 + i := 16*j + l := 8*j + dst[l+7:l] := Saturate8(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- - - - - Convert packed signed 16-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := 16*j - l := 8*j - IF k[j] - dst[l+7:l] := Saturate8(a[i+15:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW -
immintrin.h
- Convert + + + + + Convert packed signed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate8(a[i+15:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- Store - - - - - Convert packed signed 16-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 31 - i := 16*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+15:i]) - FI - ENDFOR - - - AVX512BW -
immintrin.h
- Convert + Store + + + + + Convert packed signed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 31 + i := 16*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+15:i]) + FI +ENDFOR + + + AVX512BW +
immintrin.h
+ Convert
- - - - Convert packed signed 16-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := 16*j - l := 8*j - IF k[j] - dst[l+7:l] := Saturate8(a[i+15:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW -
immintrin.h
- Convert + + + + Convert packed signed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate8(a[i+15:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- - - Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store - the results in "dst". - - FOR j := 0 to 31 - i := j*8 - l := j*16 - dst[l+15:l] := SignExtend16(a[i+7:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Convert + + + Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + l := j*16 + dst[l+15:l] := SignExtend16(a[i+7:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- - - - - Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - l := j*16 - IF k[j] - dst[l+15:l] := SignExtend16(a[i+7:i]) - ELSE - dst[l+15:l] := src[l+15:l] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Convert + + + + + Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + l := j*16 + IF k[j] + dst[l+15:l] := SignExtend16(a[i+7:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- - - - Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - l := j*16 - IF k[j] - dst[l+15:l] := SignExtend16(a[i+7:i]) - ELSE - dst[l+15:l] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Convert + + + + Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + l := j*16 + IF k[j] + dst[l+15:l] := SignExtend16(a[i+7:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- - - Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst". - - FOR j := 0 to 31 - i := 16*j - l := 8*j - dst[l+7:l] := SaturateU8(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW -
immintrin.h
- Convert + + + Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 31 + i := 16*j + l := 8*j + dst[l+7:l] := SaturateU8(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := 16*j - l := 8*j - IF k[j] - dst[l+7:l] := SaturateU8(a[i+15:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW -
immintrin.h
- Convert + + + + + Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := SaturateU8(a[i+15:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- Store - - - - - Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the active results (those with their - respective bit set in writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 31 - i := 16*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+15:i]) - FI - ENDFOR - - - AVX512BW -
immintrin.h
- Convert + Store + + + + + Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 31 + i := 16*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+15:i]) + FI +ENDFOR + + + AVX512BW +
immintrin.h
+ Convert
- - - - Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := 16*j - l := 8*j - IF k[j] - dst[l+7:l] := SaturateU8(a[i+15:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW -
immintrin.h
- Convert + + + + Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := SaturateU8(a[i+15:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- - - Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst". - - FOR j := 0 to 31 - i := 16*j - l := 8*j - dst[l+7:l] := Truncate8(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW -
immintrin.h
- Convert + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 31 + i := 16*j + l := 8*j + dst[l+7:l] := Truncate8(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- - - - - Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := 16*j - l := 8*j - IF k[j] - dst[l+7:l] := Truncate8(a[i+15:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW -
immintrin.h
- Convert + + + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate8(a[i+15:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- Store - - - - - Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, - and store the active results (those with their respective bit set in writemask "k") to - unaligned memory at "base_addr". - - FOR j := 0 to 31 - i := 16*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+15:i]) - FI - ENDFOR - - - AVX512BW -
immintrin.h
- Convert + Store + + + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 31 + i := 16*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+15:i]) + FI +ENDFOR + + + AVX512BW +
immintrin.h
+ Convert
- - - - Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := 16*j - l := 8*j - IF k[j] - dst[l+7:l] := Truncate8(a[i+15:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512BW -
immintrin.h
- Convert + + + + Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := 16*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate8(a[i+15:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- - - Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, - and store the results in "dst". - - FOR j := 0 to 31 - i := j*8 - l := j*16 - dst[l+15:l] := ZeroExtend16(a[i+7:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Convert + + + Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + l := j*16 + dst[l+15:l] := ZeroExtend16(a[i+7:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- - - - - Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - l := j*16 - IF k[j] - dst[l+15:l] := ZeroExtend16(a[i+7:i]) - ELSE - dst[l+15:l] := src[l+15:l] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Convert + + + + + Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + l := j*16 + IF k[j] + dst[l+15:l] := ZeroExtend16(a[i+7:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- - - - Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - l := j*16 - IF k[j] - dst[l+15:l] := ZeroExtend16(a[i+7:i]) - ELSE - dst[l+15:l] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Convert + + + + Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + l := j*16 + IF k[j] + dst[l+15:l] := ZeroExtend16(a[i+7:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Convert
- - - - - Broadcast 8-bit integer "a" to all elements of "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := a[7:0] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Set + + + + + Broadcast 8-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := a[7:0] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Set
- - - - Broadcast 8-bit integer "a" to all elements of "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := a[7:0] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Set + + + + Broadcast 8-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := a[7:0] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Set
- - - - - Broadcast 16-bit integer "a" to all elements of "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := a[15:0] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Set + + + + + Broadcast 16-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := a[15:0] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Set
- - - - Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := a[15:0] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Set + + + + Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := a[15:0] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Set
- - - - - Compare packed signed 8-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k". - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 63 - i := j*8 - k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed signed 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compare packed signed 8-bit integers in "a" and "b" for equality, and store the - results in mask vector "k". - - FOR j := 0 to 63 - i := j*8 - k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compare packed signed 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compare packed signed 8-bit integers in "a" and "b" for greater-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 63 - i := j*8 - k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compare packed signed 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store - the results in mask vector "k". - - FOR j := 0 to 63 - i := j*8 - k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compare packed signed 8-bit integers in "a" and "b" for less-than-or-equal, and - store the results in mask vector "k". - - FOR j := 0 to 63 - i := j*8 - k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compare packed signed 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compare packed signed 8-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k". - - FOR j := 0 to 63 - i := j*8 - k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compare packed signed 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compare packed signed 8-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k". - - FOR j := 0 to 63 - i := j*8 - k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compare packed signed 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - - Compare packed signed 8-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 63 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + + Compare packed signed 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed signed 8-bit integers in "a" and "b" for equality, and store the - results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed signed 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed signed 8-bit integers in "a" and "b" for greater-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed signed 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed signed 8-bit integers in "a" and "b" for less-than-or-equal, and - store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed signed 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed signed 8-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed signed 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed signed 8-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed signed 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 8-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k". - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 63 - i := j*8 - k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed unsigned 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compare packed unsigned 8-bit integers in "a" and "b" for equality, and store - the results in mask vector "k". - - FOR j := 0 to 63 - i := j*8 - k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compare packed unsigned 8-bit integers in "a" and "b" for - greater-than-or-equal, and store the results in mask vector "k". - - FOR j := 0 to 63 - i := j*8 - k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compare packed unsigned 8-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k". - - FOR j := 0 to 63 - i := j*8 - k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compare packed unsigned 8-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 63 - i := j*8 - k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compare packed unsigned 8-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k". - - FOR j := 0 to 63 - i := j*8 - k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compare packed unsigned 8-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k". - - FOR j := 0 to 63 - i := j*8 - k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compare packed unsigned 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 63 + i := j*8 + k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - - Compare packed unsigned 8-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 63 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + + Compare packed unsigned 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 8-bit integers in "a" and "b" for equality, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed unsigned 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 8-bit integers in "a" and "b" for - greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed unsigned 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] >= b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 8-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed unsigned 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] > b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 8-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed unsigned 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] <= b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 8-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed unsigned 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] < b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 8-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k1[j] - k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed unsigned 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 16-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k". - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 31 - i := j*16 - k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed unsigned 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compare packed unsigned 16-bit integers in "a" and "b" for equality, and store - the results in mask vector "k". - - FOR j := 0 to 31 - i := j*16 - k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compare packed unsigned 16-bit integers in "a" and "b" for - greater-than-or-equal, and store the results in mask vector "k". - - FOR j := 0 to 31 - i := j*16 - k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compare packed unsigned 16-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k". - - FOR j := 0 to 31 - i := j*16 - k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compare packed unsigned 16-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 31 - i := j*16 - k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compare packed unsigned 16-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k". - - FOR j := 0 to 31 - i := j*16 - k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compare packed unsigned 16-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k". - - FOR j := 0 to 31 - i := j*16 - k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compare packed unsigned 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - - Compare packed unsigned 16-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 31 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + + Compare packed unsigned 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 16-bit integers in "a" and "b" for equality, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed unsigned 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 16-bit integers in "a" and "b" for - greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed unsigned 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 16-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed unsigned 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 16-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed unsigned 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 16-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed unsigned 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 16-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed unsigned 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed signed 16-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k". - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 31 - i := j*16 - k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed signed 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compare packed signed 16-bit integers in "a" and "b" for equality, and store - the results in mask vector "k". - - FOR j := 0 to 31 - i := j*16 - k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compare packed signed 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compare packed signed 16-bit integers in "a" and "b" for greater-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 31 - i := j*16 - k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compare packed signed 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compare packed signed 16-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k". - - FOR j := 0 to 31 - i := j*16 - k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compare packed signed 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compare packed signed 16-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 31 - i := j*16 - k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compare packed signed 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compare packed signed 16-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k". - - FOR j := 0 to 31 - i := j*16 - k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compare packed signed 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compare packed signed 16-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k". - - FOR j := 0 to 31 - i := j*16 - k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compare packed signed 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 31 + i := j*16 + k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - - Compare packed signed 16-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 31 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + + Compare packed signed 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed signed 16-bit integers in "a" and "b" for equality, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed signed 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed signed 16-bit integers in "a" and "b" for greater-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed signed 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] >= b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed signed 16-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed signed 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] > b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed signed 16-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed signed 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] <= b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed signed 16-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed signed 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] < b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compare packed signed 16-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k1[j] - k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compare packed signed 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compute the bitwise AND of packed 8-bit integers in "a" and "b", producing - intermediate 8-bit values, and set the corresponding bit in result mask "k" (subject to - writemask "k") if the intermediate value is non-zero. - - FOR j := 0 to 63 - i := j*8 - IF k1[j] - k[j] := ((a[i+7:i] AND b[i+7:i]) != 0) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compute the bitwise AND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero. + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ((a[i+7:i] AND b[i+7:i]) != 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compute the bitwise AND of packed 8-bit integers in "a" and "b", producing - intermediate 8-bit values, and set the corresponding bit in result mask "k" if the - intermediate value is non-zero. - - FOR j := 0 to 63 - i := j*8 - k[j] := ((a[i+7:i] AND b[i+7:i]) != 0) ? 1 : 0 - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compute the bitwise AND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero. + +FOR j := 0 to 63 + i := j*8 + k[j] := ((a[i+7:i] AND b[i+7:i]) != 0) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compute the bitwise AND of packed 16-bit integers in "a" and "b", producing - intermediate 16-bit values, and set the corresponding bit in result mask "k" (subject to - writemask "k") if the intermediate value is non-zero. - - FOR j := 0 to 31 - i := j*16 - IF k1[j] - k[j] := ((a[i+15:i] AND b[i+15:i]) != 0) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compute the bitwise AND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero. + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ((a[i+15:i] AND b[i+15:i]) != 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compute the bitwise AND of packed 16-bit integers in "a" and "b", producing - intermediate 16-bit values, and set the corresponding bit in result mask "k" if the - intermediate value is non-zero. - - FOR j := 0 to 31 - i := j*16 - k[j] := ((a[i+15:i] AND b[i+15:i]) != 0) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compute the bitwise AND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero. + +FOR j := 0 to 31 + i := j*16 + k[j] := ((a[i+15:i] AND b[i+15:i]) != 0) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compute the bitwise NAND of packed 8-bit integers in "a" and "b", producing - intermediate 8-bit values, and set the corresponding bit in result mask "k" (subject to - writemask "k") if the intermediate value is zero. - - FOR j := 0 to 63 - i := j*8 - IF k1[j] - k[j] := ((a[i+7:i] AND b[i+7:i]) == 0) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compute the bitwise NAND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero. + +FOR j := 0 to 63 + i := j*8 + IF k1[j] + k[j] := ((a[i+7:i] AND b[i+7:i]) == 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compute the bitwise NAND of packed 8-bit integers in "a" and "b", producing - intermediate 8-bit values, and set the corresponding bit in result mask "k" if the - intermediate value is zero. - - FOR j := 0 to 63 - i := j*8 - k[j] := ((a[i+7:i] AND b[i+7:i]) == 0) ? 1 : 0 - ENDFOR - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compute the bitwise NAND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero. + +FOR j := 0 to 63 + i := j*8 + k[j] := ((a[i+7:i] AND b[i+7:i]) == 0) ? 1 : 0 +ENDFOR +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - - Compute the bitwise NAND of packed 16-bit integers in "a" and "b", producing - intermediate 16-bit values, and set the corresponding bit in result mask "k" (subject to - writemask "k") if the intermediate value is zero. - - FOR j := 0 to 31 - i := j*16 - IF k1[j] - k[j] := ((a[i+15:i] AND b[i+15:i]) == 0) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + + Compute the bitwise NAND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero. + +FOR j := 0 to 31 + i := j*16 + IF k1[j] + k[j] := ((a[i+15:i] AND b[i+15:i]) == 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Compute the bitwise NAND of packed 16-bit integers in "a" and "b", producing - intermediate 16-bit values, and set the corresponding bit in result mask "k" if the - intermediate value is zero. - - FOR j := 0 to 31 - i := j*16 - k[j] := ((a[i+15:i] AND b[i+15:i]) == 0) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Compare + + + + Compute the bitwise NAND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero. + +FOR j := 0 to 31 + i := j*16 + k[j] := ((a[i+15:i] AND b[i+15:i]) == 0) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Compare
- - - - Shift 128-bit lanes in "a" left by "imm8" bytes while shifting in zeros, and - store the results in "dst". - - tmp := imm8[7:0] - IF tmp > 15 - tmp := 16 - FI - dst[127:0] := a[127:0] << (tmp*8) - dst[255:128] := a[255:128] << (tmp*8) - dst[383:256] := a[383:256] << (tmp*8) - dst[511:384] := a[511:384] << (tmp*8) - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + + + + Shift 128-bit lanes in "a" left by "imm8" bytes while shifting in zeros, and store the results in "dst". + +tmp := imm8[7:0] +IF tmp > 15 + tmp := 16 +FI +dst[127:0] := a[127:0] << (tmp*8) +dst[255:128] := a[255:128] << (tmp*8) +dst[383:256] := a[383:256] << (tmp*8) +dst[511:384] := a[511:384] << (tmp*8) +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Shift
- - - - - - Shift packed 16-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - IF count[i+15:i] < 16 + + + + + + Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF count[i+15:i] < 16 dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[i+15:i]) - ELSE + ELSE dst[i+15:i] := 0 - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512BW +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - IF count[i+15:i] < 16 + + + + + Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF count[i+15:i] < 16 dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[i+15:i]) - ELSE + ELSE dst[i+15:i] := 0 - FI - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512BW +
immintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in - "dst". - - FOR j := 0 to 31 - i := j*16 - IF count[i+15:i] < 16 - dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + IF count[i+15:i] < 16 + dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Shift
- - - - - - Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - IF count[63:0] > 15 + + + + + + Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF count[63:0] > 15 dst[i+15:i] := 0 - ELSE + ELSE dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[63:0]) - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512BW +
immintrin.h
+ Shift
- - - - - - Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - IF imm8[7:0] > 15 + + + + + + Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 dst[i+15:i] := 0 - ELSE + ELSE dst[i+15:i] := ZeroExtend16(a[i+15:i] << imm8[7:0]) - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512BW +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - IF count[63:0] > 15 + + + + + Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF count[63:0] > 15 dst[i+15:i] := 0 - ELSE + ELSE dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[63:0]) - FI - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512BW +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - IF imm8[7:0] > 15 + + + + + Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 dst[i+15:i] := 0 - ELSE + ELSE dst[i+15:i] := ZeroExtend16(a[i+15:i] << imm8[7:0]) - FI - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512BW +
immintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 31 - i := j*16 - IF count[63:0] > 15 - dst[i+15:i] := 0 - ELSE - dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[63:0]) - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[63:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst". - - FOR j := 0 to 31 - i := j*16 - IF imm8[7:0] > 15 - dst[i+15:i] := 0 - ELSE - dst[i+15:i] := ZeroExtend16(a[i+15:i] << imm8[7:0]) - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend16(a[i+15:i] << imm8[7:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Shift
- - - - - - Shift packed 16-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - IF count[i+15:i] < 16 + + + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF count[i+15:i] < 16 dst[i+15:i] := SignExtend16(a[i+15:i] >> count[i+15:i]) - ELSE + ELSE dst[i+15:i] := (a[i+15] ? 0xFFFF : 0) - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512BW +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - IF count[i+15:i] < 16 + + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF count[i+15:i] < 16 dst[i+15:i] := SignExtend16(a[i+15:i] >> count[i+15:i]) - ELSE + ELSE dst[i+15:i] := (a[i+15] ? 0xFFFF : 0) - FI - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512BW +
immintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst". - - FOR j := 0 to 31 - i := j*16 - IF count[i+15:i] < 16 - dst[i+15:i] := SignExtend16(a[i+15:i] >> count[i+15:i]) - ELSE - dst[i+15:i] := (a[i+15] ? 0xFFFF : 0) - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + IF count[i+15:i] < 16 + dst[i+15:i] := SignExtend16(a[i+15:i] >> count[i+15:i]) + ELSE + dst[i+15:i] := (a[i+15] ? 0xFFFF : 0) + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Shift
- - - - - - Shift packed 16-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - IF count[63:0] > 15 + + + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF count[63:0] > 15 dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) - ELSE + ELSE dst[i+15:i] := SignExtend16(a[i+15:i] >> count[63:0]) - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512BW +
immintrin.h
+ Shift
- - - - - - Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - IF imm8[7:0] > 15 + + + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) - ELSE + ELSE dst[i+15:i] := SignExtend16(a[i+15:i] >> imm8[7:0]) - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512BW +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - IF count[63:0] > 15 + + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF count[63:0] > 15 dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) - ELSE + ELSE dst[i+15:i] := SignExtend16(a[i+15:i] >> count[63:0]) - FI - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512BW +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - IF imm8[7:0] > 15 + + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) - ELSE + ELSE dst[i+15:i] := SignExtend16(a[i+15:i] >> imm8[7:0]) - FI - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512BW +
immintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst". - - FOR j := 0 to 31 - i := j*16 - IF count[63:0] > 15 - dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) - ELSE - dst[i+15:i] := SignExtend16(a[i+15:i] >> count[63:0]) - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) + ELSE + dst[i+15:i] := SignExtend16(a[i+15:i] >> count[63:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst". - - FOR j := 0 to 31 - i := j*16 - IF imm8[7:0] > 15 - dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) - ELSE - dst[i+15:i] := SignExtend16(a[i+15:i] >> imm8[7:0]) - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) + ELSE + dst[i+15:i] := SignExtend16(a[i+15:i] >> imm8[7:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Shift
- - - - Shift 128-bit lanes in "a" right by "imm8" bytes while shifting in zeros, and - store the results in "dst". - - tmp := imm8[7:0] - IF tmp > 15 - tmp := 16 - FI - dst[127:0] := a[127:0] >> (tmp*8) - dst[255:128] := a[255:128] >> (tmp*8) - dst[383:256] := a[383:256] >> (tmp*8) - dst[511:384] := a[511:384] >> (tmp*8) - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + + + + Shift 128-bit lanes in "a" right by "imm8" bytes while shifting in zeros, and store the results in "dst". + +tmp := imm8[7:0] +IF tmp > 15 + tmp := 16 +FI +dst[127:0] := a[127:0] >> (tmp*8) +dst[255:128] := a[255:128] >> (tmp*8) +dst[383:256] := a[383:256] >> (tmp*8) +dst[511:384] := a[511:384] >> (tmp*8) +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Shift
- - - - - - Shift packed 16-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - IF count[i+15:i] < 16 + + + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF count[i+15:i] < 16 dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[i+15:i]) - ELSE + ELSE dst[i+15:i] := 0 - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512BW +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - IF count[i+15:i] < 16 + + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF count[i+15:i] < 16 dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ELSE + ELSE dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512BW +
immintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in - "dst". - - FOR j := 0 to 31 - i := j*16 - IF count[i+15:i] < 16 - dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + IF count[i+15:i] < 16 + dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Shift
- - - - - - Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - IF count[63:0] > 15 + + + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF count[63:0] > 15 dst[i+15:i] := 0 - ELSE + ELSE dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[63:0]) - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512BW +
immintrin.h
+ Shift
- - - - - - Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - IF imm8[7:0] > 15 + + + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 dst[i+15:i] := 0 - ELSE + ELSE dst[i+15:i] := ZeroExtend16(a[i+15:i] >> imm8[7:0]) - FI - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512BW +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - IF count[63:0] > 15 + + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF count[63:0] > 15 dst[i+15:i] := 0 - ELSE + ELSE dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[63:0]) - FI - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512BW +
immintrin.h
+ Shift
- - - - - Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - IF imm8[7:0] > 15 + + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + IF imm8[7:0] > 15 dst[i+15:i] := 0 - ELSE + ELSE dst[i+15:i] := ZeroExtend16(a[i+15:i] >> imm8[7:0]) - FI - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + FI + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512BW +
immintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 31 - i := j*16 - IF count[63:0] > 15 - dst[i+15:i] := 0 - ELSE - dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[63:0]) - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[63:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 31 - i := j*16 - IF imm8[7:0] > 15 - dst[i+15:i] := 0 - ELSE - dst[i+15:i] := ZeroExtend16(a[i+15:i] >> imm8[7:0]) - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512BW -
immintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend16(a[i+15:i] >> imm8[7:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512BW +
immintrin.h
+ Shift
- - - - Add 32-bit masks in "a" and "b", and store the result in "k". - - k[31:0] := a[31:0] + b[31:0] - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Mask + + + + Add 32-bit masks in "a" and "b", and store the result in "k". + +k[31:0] := a[31:0] + b[31:0] +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Mask
- - - - Add 64-bit masks in "a" and "b", and store the result in "k". - - k[63:0] := a[63:0] + b[63:0] - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Mask + + + + Add 64-bit masks in "a" and "b", and store the result in "k". + +k[63:0] := a[63:0] + b[63:0] +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Mask
- - - - Compute the bitwise AND of 32-bit masks "a" and "b", and store the result in - "k". - - k[31:0] := a[31:0] AND b[31:0] - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Mask + + + + Compute the bitwise AND of 32-bit masks "a" and "b", and store the result in "k". + +k[31:0] := a[31:0] AND b[31:0] +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Mask
- - - - Compute the bitwise AND of 64-bit masks "a" and "b", and store the result in - "k". - - k[63:0] := a[63:0] AND b[63:0] - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Mask + + + + Compute the bitwise AND of 64-bit masks "a" and "b", and store the result in "k". + +k[63:0] := a[63:0] AND b[63:0] +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Mask
- - - - Compute the bitwise NOT of 32-bit masks "a" and then AND with "b", and store - the result in "k". - - k[31:0] := (NOT a[31:0]) AND b[31:0] - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Mask + + + + Compute the bitwise NOT of 32-bit masks "a" and then AND with "b", and store the result in "k". + +k[31:0] := (NOT a[31:0]) AND b[31:0] +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Mask
- - - - Compute the bitwise NOT of 64-bit masks "a" and then AND with "b", and store - the result in "k". - - k[63:0] := (NOT a[63:0]) AND b[63:0] - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Mask + + + + Compute the bitwise NOT of 64-bit masks "a" and then AND with "b", and store the result in "k". + +k[63:0] := (NOT a[63:0]) AND b[63:0] +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Mask
- - - Compute the bitwise NOT of 32-bit mask "a", and store the result in "k". - - k[31:0] := NOT a[31:0] - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Mask + + + Compute the bitwise NOT of 32-bit mask "a", and store the result in "k". + +k[31:0] := NOT a[31:0] +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Mask
- - - Compute the bitwise NOT of 64-bit mask "a", and store the result in "k". - - k[63:0] := NOT a[63:0] - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Mask + + + Compute the bitwise NOT of 64-bit mask "a", and store the result in "k". + +k[63:0] := NOT a[63:0] +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Mask
- - - - Compute the bitwise OR of 32-bit masks "a" and "b", and store the result in - "k". - - k[31:0] := a[31:0] OR b[31:0] - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Mask + + + + Compute the bitwise OR of 32-bit masks "a" and "b", and store the result in "k". + +k[31:0] := a[31:0] OR b[31:0] +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Mask
- - - - Compute the bitwise OR of 64-bit masks "a" and "b", and store the result in - "k". - - k[63:0] := a[63:0] OR b[63:0] - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Mask + + + + Compute the bitwise OR of 64-bit masks "a" and "b", and store the result in "k". + +k[63:0] := a[63:0] OR b[63:0] +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Mask
- - - - Compute the bitwise XNOR of 32-bit masks "a" and "b", and store the result in - "k". - - k[31:0] := NOT (a[31:0] XOR b[31:0]) - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Mask + + + + Compute the bitwise XNOR of 32-bit masks "a" and "b", and store the result in "k". + +k[31:0] := NOT (a[31:0] XOR b[31:0]) +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Mask
- - - - Compute the bitwise XNOR of 64-bit masks "a" and "b", and store the result in - "k". - - k[63:0] := NOT (a[63:0] XOR b[63:0]) - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Mask + + + + Compute the bitwise XNOR of 64-bit masks "a" and "b", and store the result in "k". + +k[63:0] := NOT (a[63:0] XOR b[63:0]) +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Mask
- - - - Compute the bitwise XOR of 32-bit masks "a" and "b", and store the result in - "k". - - k[31:0] := a[31:0] XOR b[31:0] - k[MAX:32] := 0 - - - AVX512BW -
immintrin.h
- Mask + + + + Compute the bitwise XOR of 32-bit masks "a" and "b", and store the result in "k". + +k[31:0] := a[31:0] XOR b[31:0] +k[MAX:32] := 0 + + + AVX512BW +
immintrin.h
+ Mask
- - - - Compute the bitwise XOR of 64-bit masks "a" and "b", and store the result in - "k". - - k[63:0] := a[63:0] XOR b[63:0] - k[MAX:64] := 0 - - - AVX512BW -
immintrin.h
- Mask + + + + Compute the bitwise XOR of 64-bit masks "a" and "b", and store the result in "k". + +k[63:0] := a[63:0] XOR b[63:0] +k[MAX:64] := 0 + + + AVX512BW +
immintrin.h
+ Mask
- - - - Shift the bits of 32-bit mask "a" left by "count" while shifting in zeros, and - store the least significant 32 bits of the result in "k". - - k[MAX:0] := 0 - IF count[7:0] <= 31 - k[31:0] := a[31:0] << count[7:0] - FI - - - AVX512BW -
immintrin.h
- Mask + + + + Shift the bits of 32-bit mask "a" left by "count" while shifting in zeros, and store the least significant 32 bits of the result in "k". + +k[MAX:0] := 0 +IF count[7:0] <= 31 + k[31:0] := a[31:0] << count[7:0] +FI + + + AVX512BW +
immintrin.h
+ Mask
- - - - Shift the bits of 64-bit mask "a" left by "count" while shifting in zeros, and - store the least significant 64 bits of the result in "k". - - k[MAX:0] := 0 - IF count[7:0] <= 63 - k[63:0] := a[63:0] << count[7:0] - FI - - - AVX512BW -
immintrin.h
- Mask + + + + Shift the bits of 64-bit mask "a" left by "count" while shifting in zeros, and store the least significant 64 bits of the result in "k". + +k[MAX:0] := 0 +IF count[7:0] <= 63 + k[63:0] := a[63:0] << count[7:0] +FI + + + AVX512BW +
immintrin.h
+ Mask
- - - - Shift the bits of 32-bit mask "a" right by "count" while shifting in zeros, and - store the least significant 32 bits of the result in "k". - - k[MAX:0] := 0 - IF count[7:0] <= 31 - k[31:0] := a[31:0] >> count[7:0] - FI - - - AVX512BW -
immintrin.h
- Mask + + + + Shift the bits of 32-bit mask "a" right by "count" while shifting in zeros, and store the least significant 32 bits of the result in "k". + +k[MAX:0] := 0 +IF count[7:0] <= 31 + k[31:0] := a[31:0] >> count[7:0] +FI + + + AVX512BW +
immintrin.h
+ Mask
- - - - Shift the bits of 64-bit mask "a" right by "count" while shifting in zeros, and - store the least significant 64 bits of the result in "k". - - k[MAX:0] := 0 - IF count[7:0] <= 63 - k[63:0] := a[63:0] >> count[7:0] - FI - - - AVX512BW -
immintrin.h
- Mask + + + + Shift the bits of 64-bit mask "a" right by "count" while shifting in zeros, and store the least significant 64 bits of the result in "k". + +k[MAX:0] := 0 +IF count[7:0] <= 63 + k[63:0] := a[63:0] >> count[7:0] +FI + + + AVX512BW +
immintrin.h
+ Mask
- - - - - Compute the bitwise OR of 32-bit masks "a" and "b". If the result is all zeros, - store 1 in "dst", otherwise store 0 in "dst". If the result is all ones, store 1 in - "all_ones", otherwise store 0 in "all_ones". - - tmp[31:0] := a[31:0] OR b[31:0] - IF tmp[31:0] == 0x0 - dst := 1 - ELSE - dst := 0 - FI - IF tmp[31:0] == 0xFFFFFFFF - MEM[all_ones+7:all_ones] := 1 - ELSE - MEM[all_ones+7:all_ones] := 0 - FI - - - AVX512BW -
immintrin.h
- Mask + + + + + Compute the bitwise OR of 32-bit masks "a" and "b". If the result is all zeros, store 1 in "dst", otherwise store 0 in "dst". If the result is all ones, store 1 in "all_ones", otherwise store 0 in "all_ones". + +tmp[31:0] := a[31:0] OR b[31:0] +IF tmp[31:0] == 0x0 + dst := 1 +ELSE + dst := 0 +FI +IF tmp[31:0] == 0xFFFFFFFF + MEM[all_ones+7:all_ones] := 1 +ELSE + MEM[all_ones+7:all_ones] := 0 +FI + + + AVX512BW +
immintrin.h
+ Mask
- - - - Compute the bitwise OR of 32-bit masks "a" and "b". If the result is all - zeroes, store 1 in "dst", otherwise store 0 in "dst". - - tmp[31:0] := a[31:0] OR b[31:0] - IF tmp[31:0] == 0x0 - dst := 1 - ELSE - dst := 0 - FI - - - AVX512BW -
immintrin.h
- Mask + + + + Compute the bitwise OR of 32-bit masks "a" and "b". If the result is all zeroes, store 1 in "dst", otherwise store 0 in "dst". + +tmp[31:0] := a[31:0] OR b[31:0] +IF tmp[31:0] == 0x0 + dst := 1 +ELSE + dst := 0 +FI + + + AVX512BW +
immintrin.h
+ Mask
- - - - Compute the bitwise OR of 32-bit masks "a" and "b". If the result is all ones, - store 1 in "dst", otherwise store 0 in "dst". - - tmp[31:0] := a[31:0] OR b[31:0] - IF tmp[31:0] == 0xFFFFFFFF - dst := 1 - ELSE - dst := 0 - FI - - - AVX512BW -
immintrin.h
- Mask + + + + Compute the bitwise OR of 32-bit masks "a" and "b". If the result is all ones, store 1 in "dst", otherwise store 0 in "dst". + +tmp[31:0] := a[31:0] OR b[31:0] +IF tmp[31:0] == 0xFFFFFFFF + dst := 1 +ELSE + dst := 0 +FI + + + AVX512BW +
immintrin.h
+ Mask
- - - - - Compute the bitwise OR of 64-bit masks "a" and "b". If the result is all zeros, - store 1 in "dst", otherwise store 0 in "dst". If the result is all ones, store 1 in - "all_ones", otherwise store 0 in "all_ones". - - tmp[63:0] := a[63:0] OR b[63:0] - IF tmp[63:0] == 0x0 - dst := 1 - ELSE - dst := 0 - FI - IF tmp[7:0] == 0xFFFFFFFFFFFFFFFF - MEM[all_ones+7:all_ones] := 1 - ELSE - MEM[all_ones+7:all_ones] := 0 - FI - - - AVX512BW -
immintrin.h
- Mask + + + + + Compute the bitwise OR of 64-bit masks "a" and "b". If the result is all zeros, store 1 in "dst", otherwise store 0 in "dst". If the result is all ones, store 1 in "all_ones", otherwise store 0 in "all_ones". + +tmp[63:0] := a[63:0] OR b[63:0] +IF tmp[63:0] == 0x0 + dst := 1 +ELSE + dst := 0 +FI +IF tmp[7:0] == 0xFFFFFFFFFFFFFFFF + MEM[all_ones+7:all_ones] := 1 +ELSE + MEM[all_ones+7:all_ones] := 0 +FI + + + AVX512BW +
immintrin.h
+ Mask
- - - - Compute the bitwise OR of 64-bit masks "a" and "b". If the result is all - zeroes, store 1 in "dst", otherwise store 0 in "dst". - - tmp[63:0] := a[63:0] OR b[63:0] - IF tmp[63:0] == 0x0 - dst := 1 - ELSE - dst := 0 - FI - - - AVX512BW -
immintrin.h
- Mask + + + + Compute the bitwise OR of 64-bit masks "a" and "b". If the result is all zeroes, store 1 in "dst", otherwise store 0 in "dst". + +tmp[63:0] := a[63:0] OR b[63:0] +IF tmp[63:0] == 0x0 + dst := 1 +ELSE + dst := 0 +FI + + + AVX512BW +
immintrin.h
+ Mask
- - - - Compute the bitwise OR of 64-bit masks "a" and "b". If the result is all ones, - store 1 in "dst", otherwise store 0 in "dst". - - tmp[63:0] := a[63:0] OR b[63:0] - IF tmp[63:0] == 0xFFFFFFFFFFFFFFFF - dst := 1 - ELSE - dst := 0 - FI - - - AVX512BW -
immintrin.h
- Mask + + + + Compute the bitwise OR of 64-bit masks "a" and "b". If the result is all ones, store 1 in "dst", otherwise store 0 in "dst". + +tmp[63:0] := a[63:0] OR b[63:0] +IF tmp[63:0] == 0xFFFFFFFFFFFFFFFF + dst := 1 +ELSE + dst := 0 +FI + + + AVX512BW +
immintrin.h
+ Mask
- - - - - Compute the bitwise AND of 32-bit masks "a" and "b", and if the result is all - zeros, store 1 in "dst", otherwise store 0 in "dst". Compute the bitwise NOT of "a" and - then AND with "b", if the result is all zeros, store 1 in "and_not", otherwise store 0 - in "and_not". - - tmp1[31:0] := a[31:0] AND b[31:0] - IF tmp1[31:0] == 0x0 - dst := 1 - ELSE - dst := 0 - FI - tmp2[31:0] := (NOT a[31:0]) AND b[31:0] - IF tmp2[31:0] == 0x0 - MEM[and_not+7:and_not] := 1 - ELSE - MEM[and_not+7:and_not] := 0 - FI - - - AVX512BW -
immintrin.h
- Mask + + + + + Compute the bitwise AND of 32-bit masks "a" and "b", and if the result is all zeros, store 1 in "dst", otherwise store 0 in "dst". Compute the bitwise NOT of "a" and then AND with "b", if the result is all zeros, store 1 in "and_not", otherwise store 0 in "and_not". + +tmp1[31:0] := a[31:0] AND b[31:0] +IF tmp1[31:0] == 0x0 + dst := 1 +ELSE + dst := 0 +FI +tmp2[31:0] := (NOT a[31:0]) AND b[31:0] +IF tmp2[31:0] == 0x0 + MEM[and_not+7:and_not] := 1 +ELSE + MEM[and_not+7:and_not] := 0 +FI + + + AVX512BW +
immintrin.h
+ Mask
- - - - Compute the bitwise AND of 32-bit masks "a" and "b", and if the result is all - zeros, store 1 in "dst", otherwise store 0 in "dst". - - tmp[31:0] := a[31:0] AND b[31:0] - IF tmp[31:0] == 0x0 - dst := 1 - ELSE - dst := 0 - FI - - - AVX512BW -
immintrin.h
- Mask + + + + Compute the bitwise AND of 32-bit masks "a" and "b", and if the result is all zeros, store 1 in "dst", otherwise store 0 in "dst". + +tmp[31:0] := a[31:0] AND b[31:0] +IF tmp[31:0] == 0x0 + dst := 1 +ELSE + dst := 0 +FI + + + AVX512BW +
immintrin.h
+ Mask
- - - - Compute the bitwise NOT of 32-bit mask "a" and then AND with "b", if the result - is all zeroes, store 1 in "dst", otherwise store 0 in "dst". - - tmp[31:0] := (NOT a[31:0]) AND b[31:0] - IF tmp[31:0] == 0x0 - dst := 1 - ELSE - dst := 0 - FI - - - AVX512BW -
immintrin.h
- Mask + + + + Compute the bitwise NOT of 32-bit mask "a" and then AND with "b", if the result is all zeroes, store 1 in "dst", otherwise store 0 in "dst". + +tmp[31:0] := (NOT a[31:0]) AND b[31:0] +IF tmp[31:0] == 0x0 + dst := 1 +ELSE + dst := 0 +FI + + + AVX512BW +
immintrin.h
+ Mask
- - - - - Compute the bitwise AND of 64-bit masks "a" and "b", and if the result is all - zeros, store 1 in "dst", otherwise store 0 in "dst". Compute the bitwise NOT of "a" and - then AND with "b", if the result is all zeros, store 1 in "and_not", otherwise store 0 - in "and_not". - - tmp1[63:0] := a[63:0] AND b[63:0] - IF tmp1[63:0] == 0x0 - dst := 1 - ELSE - dst := 0 - FI - tmp2[63:0] := (NOT a[63:0]) AND b[63:0] - IF tmp2[63:0] == 0x0 - MEM[and_not+7:and_not] := 1 - ELSE - MEM[and_not+7:and_not] := 0 - FI - - - AVX512BW -
immintrin.h
- Mask + + + + + Compute the bitwise AND of 64-bit masks "a" and "b", and if the result is all zeros, store 1 in "dst", otherwise store 0 in "dst". Compute the bitwise NOT of "a" and then AND with "b", if the result is all zeros, store 1 in "and_not", otherwise store 0 in "and_not". + +tmp1[63:0] := a[63:0] AND b[63:0] +IF tmp1[63:0] == 0x0 + dst := 1 +ELSE + dst := 0 +FI +tmp2[63:0] := (NOT a[63:0]) AND b[63:0] +IF tmp2[63:0] == 0x0 + MEM[and_not+7:and_not] := 1 +ELSE + MEM[and_not+7:and_not] := 0 +FI + + + AVX512BW +
immintrin.h
+ Mask
- - - - Compute the bitwise AND of 64-bit masks "a" and "b", and if the result is all - zeros, store 1 in "dst", otherwise store 0 in "dst". - - tmp[63:0] := a[63:0] AND b[63:0] - IF tmp[63:0] == 0x0 - dst := 1 - ELSE - dst := 0 - FI - - - AVX512BW -
immintrin.h
- Mask + + + + Compute the bitwise AND of 64-bit masks "a" and "b", and if the result is all zeros, store 1 in "dst", otherwise store 0 in "dst". + +tmp[63:0] := a[63:0] AND b[63:0] +IF tmp[63:0] == 0x0 + dst := 1 +ELSE + dst := 0 +FI + + + AVX512BW +
immintrin.h
+ Mask
- - - - Compute the bitwise NOT of 64-bit mask "a" and then AND with "b", if the result - is all zeroes, store 1 in "dst", otherwise store 0 in "dst". - - tmp[63:0] := (NOT a[63:0]) AND b[63:0] - IF tmp[63:0] == 0x0 - dst := 1 - ELSE - dst := 0 - FI - - - AVX512BW -
immintrin.h
- Mask + + + + Compute the bitwise NOT of 64-bit mask "a" and then AND with "b", if the result is all zeroes, store 1 in "dst", otherwise store 0 in "dst". + +tmp[63:0] := (NOT a[63:0]) AND b[63:0] +IF tmp[63:0] == 0x0 + dst := 1 +ELSE + dst := 0 +FI + + + AVX512BW +
immintrin.h
+ Mask
- - - Convert 32-bit mask "a" into an integer value, and store the result in "dst". - - dst := ZeroExtend32(a[31:0]) - - - AVX512BW -
immintrin.h
- Mask + + + Convert 32-bit mask "a" into an integer value, and store the result in "dst". + +dst := ZeroExtend32(a[31:0]) + + + AVX512BW +
immintrin.h
+ Mask
- - - Convert 64-bit mask "a" into an integer value, and store the result in "dst". - - dst := ZeroExtend64(a[63:0]) - - - AVX512BW -
immintrin.h
- Mask + + + Convert 64-bit mask "a" into an integer value, and store the result in "dst". + +dst := ZeroExtend64(a[63:0]) + + + AVX512BW +
immintrin.h
+ Mask
- - - Convert integer value "a" into an 32-bit mask, and store the result in "k". - - k := ZeroExtend32(a[31:0]) - - - AVX512BW -
immintrin.h
- Mask + + + Convert integer value "a" into an 32-bit mask, and store the result in "k". + +k := ZeroExtend32(a[31:0]) + + + AVX512BW +
immintrin.h
+ Mask
- - - Convert integer value "a" into an 64-bit mask, and store the result in "k". - - k := ZeroExtend64(a[63:0]) - - - AVX512BW -
immintrin.h
- Mask -
- - - - - - Broadcast the low 8-bits from input mask "k" to all 64-bit elements of "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := ZeroExtend64(k[7:0]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Miscellaneous + + + Convert integer value "a" into an 64-bit mask, and store the result in "k". + +k := ZeroExtend64(a[63:0]) + + + AVX512BW +
immintrin.h
+ Mask +
+ + + + + + Broadcast the low 8-bits from input mask "k" to all 64-bit elements of "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ZeroExtend64(k[7:0]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512CD + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Broadcast the low 8-bits from input mask "k" to all 64-bit elements of "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := ZeroExtend64(k[7:0]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Miscellaneous + + + Broadcast the low 8-bits from input mask "k" to all 64-bit elements of "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ZeroExtend64(k[7:0]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512CD + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Broadcast the low 16-bits from input mask "k" to all 32-bit elements of "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := ZeroExtend32(k[15:0]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Miscellaneous + + + Broadcast the low 16-bits from input mask "k" to all 32-bit elements of "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ZeroExtend32(k[15:0]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512CD + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Broadcast the low 16-bits from input mask "k" to all 32-bit elements of "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ZeroExtend32(k[15:0]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Miscellaneous + + + Broadcast the low 16-bits from input mask "k" to all 32-bit elements of "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ZeroExtend32(k[15:0]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512CD + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Test each 32-bit element of "a" for equality with all other elements in "a" - closer to the least significant bit. Each element's comparison forms a zero extended bit - vector in "dst". - - FOR j := 0 to 7 - i := j*32 - FOR k := 0 to j-1 - m := k*32 - dst[i+k] := (a[i+31:i] == a[m+31:m]) ? 1 : 0 - ENDFOR - dst[i+31:i+j] := 0 - ENDFOR - dst[MAX:256] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Compare + + + Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit. Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 7 + i := j*32 + FOR k := 0 to j-1 + m := k*32 + dst[i+k] := (a[i+31:i] == a[m+31:m]) ? 1 : 0 + ENDFOR + dst[i+31:i+j] := 0 +ENDFOR +dst[MAX:256] := 0 + + + AVX512CD + AVX512VL +
immintrin.h
+ Compare
- - - - - Test each 32-bit element of "a" for equality with all other elements in "a" - closer to the least significant bit using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). Each element's comparison forms a zero - extended bit vector in "dst". - - FOR j := 0 to 7 - i := j*32 - IF k[j] - FOR l := 0 to j-1 + + + + + Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 7 + i := j*32 + IF k[j] + FOR l := 0 to j-1 m := l*32 dst[i+l] := (a[i+31:i] == a[m+31:m]) ? 1 : 0 - ENDFOR - dst[i+31:i+j] := 0 - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Compare + ENDFOR + dst[i+31:i+j] := 0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512CD + AVX512VL +
immintrin.h
+ Compare
- - - - Test each 32-bit element of "a" for equality with all other elements in "a" - closer to the least significant bit using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). Each element's comparison forms a zero extended bit - vector in "dst". - - FOR j := 0 to 7 - i := j*32 - IF k[j] - FOR l := 0 to j-1 + + + + Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 7 + i := j*32 + IF k[j] + FOR l := 0 to j-1 m := l*32 dst[i+l] := (a[i+31:i] == a[m+31:m]) ? 1 : 0 - ENDFOR - dst[i+31:i+j] := 0 - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Compare + ENDFOR + dst[i+31:i+j] := 0 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512CD + AVX512VL +
immintrin.h
+ Compare
- - - Test each 32-bit element of "a" for equality with all other elements in "a" - closer to the least significant bit. Each element's comparison forms a zero extended bit - vector in "dst". - - FOR j := 0 to 3 - i := j*32 - FOR k := 0 to j-1 - m := k*32 - dst[i+k] := (a[i+31:i] == a[m+31:m]) ? 1 : 0 - ENDFOR - dst[i+31:i+j] := 0 - ENDFOR - dst[MAX:128] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Compare + + + Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit. Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 3 + i := j*32 + FOR k := 0 to j-1 + m := k*32 + dst[i+k] := (a[i+31:i] == a[m+31:m]) ? 1 : 0 + ENDFOR + dst[i+31:i+j] := 0 +ENDFOR +dst[MAX:128] := 0 + + + AVX512CD + AVX512VL +
immintrin.h
+ Compare
- - - - - Test each 32-bit element of "a" for equality with all other elements in "a" - closer to the least significant bit using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). Each element's comparison forms a zero - extended bit vector in "dst". - - FOR j := 0 to 3 - i := j*32 - IF k[j] - FOR l := 0 to j-1 + + + + + Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 3 + i := j*32 + IF k[j] + FOR l := 0 to j-1 m := l*32 dst[i+l] := (a[i+31:i] == a[m+31:m]) ? 1 : 0 - ENDFOR - dst[i+31:i+j] := 0 - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Compare + ENDFOR + dst[i+31:i+j] := 0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512CD + AVX512VL +
immintrin.h
+ Compare
- - - - Test each 32-bit element of "a" for equality with all other elements in "a" - closer to the least significant bit using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). Each element's comparison forms a zero extended bit - vector in "dst". - - FOR j := 0 to 3 - i := j*32 - IF k[j] - FOR l := 0 to j-1 + + + + Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 3 + i := j*32 + IF k[j] + FOR l := 0 to j-1 m := l*32 dst[i+l] := (a[i+31:i] == a[m+31:m]) ? 1 : 0 - ENDFOR - dst[i+31:i+j] := 0 - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Compare + ENDFOR + dst[i+31:i+j] := 0 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512CD + AVX512VL +
immintrin.h
+ Compare
- - - Test each 64-bit element of "a" for equality with all other elements in "a" - closer to the least significant bit. Each element's comparison forms a zero extended bit - vector in "dst". - - FOR j := 0 to 3 - i := j*64 - FOR k := 0 to j-1 - m := k*64 - dst[i+k] := (a[i+63:i] == a[m+63:m]) ? 1 : 0 - ENDFOR - dst[i+63:i+j] := 0 - ENDFOR - dst[MAX:256] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Compare + + + Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit. Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 3 + i := j*64 + FOR k := 0 to j-1 + m := k*64 + dst[i+k] := (a[i+63:i] == a[m+63:m]) ? 1 : 0 + ENDFOR + dst[i+63:i+j] := 0 +ENDFOR +dst[MAX:256] := 0 + + + AVX512CD + AVX512VL +
immintrin.h
+ Compare
- - - - - Test each 64-bit element of "a" for equality with all other elements in "a" - closer to the least significant bit using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). Each element's comparison forms a zero - extended bit vector in "dst". - - FOR j := 0 to 3 - i := j*64 - IF k[j] - FOR l := 0 to j-1 + + + + + Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 3 + i := j*64 + IF k[j] + FOR l := 0 to j-1 m := l*64 dst[i+l] := (a[i+63:i] == a[m+63:m]) ? 1 : 0 - ENDFOR - dst[i+63:i+j] := 0 - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Compare + ENDFOR + dst[i+63:i+j] := 0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512CD + AVX512VL +
immintrin.h
+ Compare
- - - - Test each 64-bit element of "a" for equality with all other elements in "a" - closer to the least significant bit using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). Each element's comparison forms a zero extended bit - vector in "dst". - - FOR j := 0 to 3 - i := j*64 - IF k[j] - FOR l := 0 to j-1 + + + + Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 3 + i := j*64 + IF k[j] + FOR l := 0 to j-1 m := l*64 dst[i+l] := (a[i+63:i] == a[m+63:m]) ? 1 : 0 - ENDFOR - dst[i+63:i+j] := 0 - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Compare + ENDFOR + dst[i+63:i+j] := 0 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512CD + AVX512VL +
immintrin.h
+ Compare
- - - Test each 64-bit element of "a" for equality with all other elements in "a" - closer to the least significant bit. Each element's comparison forms a zero extended bit - vector in "dst". - - FOR j := 0 to 1 - i := j*64 - FOR k := 0 to j-1 - m := k*64 - dst[i+k] := (a[i+63:i] == a[m+63:m]) ? 1 : 0 - ENDFOR - dst[i+63:i+j] := 0 - ENDFOR - dst[MAX:128] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Compare + + + Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit. Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 1 + i := j*64 + FOR k := 0 to j-1 + m := k*64 + dst[i+k] := (a[i+63:i] == a[m+63:m]) ? 1 : 0 + ENDFOR + dst[i+63:i+j] := 0 +ENDFOR +dst[MAX:128] := 0 + + + AVX512CD + AVX512VL +
immintrin.h
+ Compare
- - - - - Test each 64-bit element of "a" for equality with all other elements in "a" - closer to the least significant bit using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). Each element's comparison forms a zero - extended bit vector in "dst". - - FOR j := 0 to 1 - i := j*64 - IF k[j] - FOR l := 0 to j-1 + + + + + Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 1 + i := j*64 + IF k[j] + FOR l := 0 to j-1 m := l*64 dst[i+l] := (a[i+63:i] == a[m+63:m]) ? 1 : 0 - ENDFOR - dst[i+63:i+j] := 0 - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Compare + ENDFOR + dst[i+63:i+j] := 0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512CD + AVX512VL +
immintrin.h
+ Compare
- - - - Test each 64-bit element of "a" for equality with all other elements in "a" - closer to the least significant bit using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). Each element's comparison forms a zero extended bit - vector in "dst". - - FOR j := 0 to 1 - i := j*64 - IF k[j] - FOR l := 0 to j-1 + + + + Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 1 + i := j*64 + IF k[j] + FOR l := 0 to j-1 m := l*64 dst[i+l] := (a[i+63:i] == a[m+63:m]) ? 1 : 0 - ENDFOR - dst[i+63:i+j] := 0 - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Compare + ENDFOR + dst[i+63:i+j] := 0 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512CD + AVX512VL +
immintrin.h
+ Compare
- - - Counts the number of leading zero bits in each packed 32-bit integer in "a", - and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - tmp := 31 - dst[i+31:i] := 0 - DO WHILE (tmp >= 0 AND a[i+tmp] == 0) - tmp := tmp - 1 - dst[i+31:i] := dst[i+31:i] + 1 - OD - ENDFOR - dst[MAX:256] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Bit Manipulation + + + Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + tmp := 31 + dst[i+31:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + tmp := tmp - 1 + dst[i+31:i] := dst[i+31:i] + 1 + OD +ENDFOR +dst[MAX:256] := 0 + + + AVX512CD + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - - Counts the number of leading zero bits in each packed 32-bit integer in "a", - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - tmp := 31 - dst[i+31:i] := 0 - DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + + + + + Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + tmp := 31 + dst[i+31:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) tmp := tmp - 1 dst[i+31:i] := dst[i+31:i] + 1 - OD - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Bit Manipulation + OD + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512CD + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - Counts the number of leading zero bits in each packed 32-bit integer in "a", - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - tmp := 31 - dst[i+31:i] := 0 - DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + + + + Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + tmp := 31 + dst[i+31:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) tmp := tmp - 1 dst[i+31:i] := dst[i+31:i] + 1 - OD - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Bit Manipulation + OD + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512CD + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - Counts the number of leading zero bits in each packed 32-bit integer in "a", - and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - tmp := 31 - dst[i+31:i] := 0 - DO WHILE (tmp >= 0 AND a[i+tmp] == 0) - tmp := tmp - 1 - dst[i+31:i] := dst[i+31:i] + 1 - OD - ENDFOR - dst[MAX:128] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Bit Manipulation + + + Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + tmp := 31 + dst[i+31:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + tmp := tmp - 1 + dst[i+31:i] := dst[i+31:i] + 1 + OD +ENDFOR +dst[MAX:128] := 0 + + + AVX512CD + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - - Counts the number of leading zero bits in each packed 32-bit integer in "a", - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - tmp := 31 - dst[i+31:i] := 0 - DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + + + + + Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + tmp := 31 + dst[i+31:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) tmp := tmp - 1 dst[i+31:i] := dst[i+31:i] + 1 - OD - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Bit Manipulation + OD + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512CD + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - Counts the number of leading zero bits in each packed 32-bit integer in "a", - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - tmp := 31 - dst[i+31:i] := 0 - DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + + + + Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + tmp := 31 + dst[i+31:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) tmp := tmp - 1 dst[i+31:i] := dst[i+31:i] + 1 - OD - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Bit Manipulation + OD + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512CD + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - Counts the number of leading zero bits in each packed 64-bit integer in "a", - and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - tmp := 63 - dst[i+63:i] := 0 - DO WHILE (tmp >= 0 AND a[i+tmp] == 0) - tmp := tmp - 1 - dst[i+63:i] := dst[i+63:i] + 1 - OD - ENDFOR - dst[MAX:256] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Bit Manipulation + + + Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + tmp := 63 + dst[i+63:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + tmp := tmp - 1 + dst[i+63:i] := dst[i+63:i] + 1 + OD +ENDFOR +dst[MAX:256] := 0 + + + AVX512CD + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - - Counts the number of leading zero bits in each packed 64-bit integer in "a", - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - tmp := 63 - dst[i+63:i] := 0 - DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + + + + + Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + tmp := 63 + dst[i+63:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) tmp := tmp - 1 dst[i+63:i] := dst[i+63:i] + 1 - OD - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Bit Manipulation + OD + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512CD + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - Counts the number of leading zero bits in each packed 64-bit integer in "a", - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - tmp := 63 - dst[i+63:i] := 0 - DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + + + + Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + tmp := 63 + dst[i+63:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) tmp := tmp - 1 dst[i+63:i] := dst[i+63:i] + 1 - OD - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Bit Manipulation + OD + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512CD + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - Counts the number of leading zero bits in each packed 64-bit integer in "a", - and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - tmp := 63 - dst[i+63:i] := 0 - DO WHILE (tmp >= 0 AND a[i+tmp] == 0) - tmp := tmp - 1 - dst[i+63:i] := dst[i+63:i] + 1 - OD - ENDFOR - dst[MAX:128] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Bit Manipulation + + + Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + tmp := 63 + dst[i+63:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + tmp := tmp - 1 + dst[i+63:i] := dst[i+63:i] + 1 + OD +ENDFOR +dst[MAX:128] := 0 + + + AVX512CD + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - - Counts the number of leading zero bits in each packed 64-bit integer in "a", - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - tmp := 63 - dst[i+63:i] := 0 - DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + + + + + Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + tmp := 63 + dst[i+63:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) tmp := tmp - 1 dst[i+63:i] := dst[i+63:i] + 1 - OD - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Bit Manipulation + OD + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512CD + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - Counts the number of leading zero bits in each packed 64-bit integer in "a", - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - tmp := 63 - dst[i+63:i] := 0 - DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + + + + Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + tmp := 63 + dst[i+63:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) tmp := tmp - 1 dst[i+63:i] := dst[i+63:i] + 1 - OD - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512CD - AVX512VL -
immintrin.h
- Bit Manipulation -
- - - - - - Broadcast the low 8-bits from input mask "k" to all 64-bit elements of "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := ZeroExtend64(k[7:0]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512CD -
immintrin.h
- Swizzle + OD + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512CD + AVX512VL +
immintrin.h
+ Bit Manipulation +
+ + + + + + Broadcast the low 8-bits from input mask "k" to all 64-bit elements of "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ZeroExtend64(k[7:0]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512CD +
immintrin.h
+ Swizzle
- - - Broadcast the low 16-bits from input mask "k" to all 32-bit elements of "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := ZeroExtend32(k[15:0]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512CD -
immintrin.h
- Swizzle + + + Broadcast the low 16-bits from input mask "k" to all 32-bit elements of "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ZeroExtend32(k[15:0]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512CD +
immintrin.h
+ Swizzle
- - - Test each 32-bit element of "a" for equality with all other elements in "a" - closer to the least significant bit. Each element's comparison forms a zero extended bit - vector in "dst". - - FOR j := 0 to 15 - i := j*32 - FOR k := 0 to j-1 - m := k*32 - dst[i+k] := (a[i+31:i] == a[m+31:m]) ? 1 : 0 - ENDFOR - dst[i+31:i+j] := 0 - ENDFOR - dst[MAX:512] := 0 - - - AVX512CD -
immintrin.h
- Compare + + + Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit. Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 15 + i := j*32 + FOR k := 0 to j-1 + m := k*32 + dst[i+k] := (a[i+31:i] == a[m+31:m]) ? 1 : 0 + ENDFOR + dst[i+31:i+j] := 0 +ENDFOR +dst[MAX:512] := 0 + + + AVX512CD +
immintrin.h
+ Compare
- - - - - Test each 32-bit element of "a" for equality with all other elements in "a" - closer to the least significant bit using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). Each element's comparison forms a zero - extended bit vector in "dst". - - FOR j := 0 to 15 - i := j*32 - IF k[j] - FOR l := 0 to j-1 + + + + + Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 15 + i := j*32 + IF k[j] + FOR l := 0 to j-1 m := l*32 dst[i+l] := (a[i+31:i] == a[m+31:m]) ? 1 : 0 - ENDFOR - dst[i+31:i+j] := 0 - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512CD -
immintrin.h
- Compare + ENDFOR + dst[i+31:i+j] := 0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512CD +
immintrin.h
+ Compare
- - - - Test each 32-bit element of "a" for equality with all other elements in "a" - closer to the least significant bit using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). Each element's comparison forms a zero extended bit - vector in "dst". - - FOR j := 0 to 15 - i := j*32 - IF k[j] - FOR l := 0 to j-1 + + + + Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 15 + i := j*32 + IF k[j] + FOR l := 0 to j-1 m := l*32 dst[i+l] := (a[i+31:i] == a[m+31:m]) ? 1 : 0 - ENDFOR - dst[i+31:i+j] := 0 - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512CD -
immintrin.h
- Compare + ENDFOR + dst[i+31:i+j] := 0 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512CD +
immintrin.h
+ Compare
- - - Test each 64-bit element of "a" for equality with all other elements in "a" - closer to the least significant bit. Each element's comparison forms a zero extended bit - vector in "dst". - - FOR j := 0 to 7 - i := j*64 - FOR k := 0 to j-1 - m := k*64 - dst[i+k] := (a[i+63:i] == a[m+63:m]) ? 1 : 0 - ENDFOR - dst[i+63:i+j] := 0 - ENDFOR - dst[MAX:512] := 0 - - - AVX512CD -
immintrin.h
- Compare + + + Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit. Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 7 + i := j*64 + FOR k := 0 to j-1 + m := k*64 + dst[i+k] := (a[i+63:i] == a[m+63:m]) ? 1 : 0 + ENDFOR + dst[i+63:i+j] := 0 +ENDFOR +dst[MAX:512] := 0 + + + AVX512CD +
immintrin.h
+ Compare
- - - - - Test each 64-bit element of "a" for equality with all other elements in "a" - closer to the least significant bit using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). Each element's comparison forms a zero - extended bit vector in "dst". - - FOR j := 0 to 7 - i := j*64 - IF k[j] - FOR l := 0 to j-1 + + + + + Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 7 + i := j*64 + IF k[j] + FOR l := 0 to j-1 m := l*64 dst[i+l] := (a[i+63:i] == a[m+63:m]) ? 1 : 0 - ENDFOR - dst[i+63:i+j] := 0 - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512CD -
immintrin.h
- Compare + ENDFOR + dst[i+63:i+j] := 0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512CD +
immintrin.h
+ Compare
- - - - Test each 64-bit element of "a" for equality with all other elements in "a" - closer to the least significant bit using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). Each element's comparison forms a zero extended bit - vector in "dst". - - FOR j := 0 to 7 - i := j*64 - IF k[j] - FOR l := 0 to j-1 + + + + Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst". + +FOR j := 0 to 7 + i := j*64 + IF k[j] + FOR l := 0 to j-1 m := l*64 dst[i+l] := (a[i+63:i] == a[m+63:m]) ? 1 : 0 - ENDFOR - dst[i+63:i+j] := 0 - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512CD -
immintrin.h
- Compare + ENDFOR + dst[i+63:i+j] := 0 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512CD +
immintrin.h
+ Compare
- - - Counts the number of leading zero bits in each packed 32-bit integer in "a", - and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - tmp := 31 - dst[i+31:i] := 0 - DO WHILE (tmp >= 0 AND a[i+tmp] == 0) - tmp := tmp - 1 - dst[i+31:i] := dst[i+31:i] + 1 - OD - ENDFOR - dst[MAX:512] := 0 - - - AVX512CD -
immintrin.h
- Bit Manipulation + + + Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + tmp := 31 + dst[i+31:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + tmp := tmp - 1 + dst[i+31:i] := dst[i+31:i] + 1 + OD +ENDFOR +dst[MAX:512] := 0 + + + AVX512CD +
immintrin.h
+ Bit Manipulation
- - - - - Counts the number of leading zero bits in each packed 32-bit integer in "a", - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - tmp := 31 - dst[i+31:i] := 0 - DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + + + + + Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + tmp := 31 + dst[i+31:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) tmp := tmp - 1 dst[i+31:i] := dst[i+31:i] + 1 - OD - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512CD -
immintrin.h
- Bit Manipulation + OD + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512CD +
immintrin.h
+ Bit Manipulation
- - - - Counts the number of leading zero bits in each packed 32-bit integer in "a", - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - tmp := 31 - dst[i+31:i] := 0 - DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + + + + Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + tmp := 31 + dst[i+31:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) tmp := tmp - 1 dst[i+31:i] := dst[i+31:i] + 1 - OD - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512CD -
immintrin.h
- Bit Manipulation + OD + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512CD +
immintrin.h
+ Bit Manipulation
- - - Counts the number of leading zero bits in each packed 64-bit integer in "a", - and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - tmp := 63 - dst[i+63:i] := 0 - DO WHILE (tmp >= 0 AND a[i+tmp] == 0) - tmp := tmp - 1 - dst[i+63:i] := dst[i+63:i] + 1 - OD - ENDFOR - dst[MAX:512] := 0 - - - AVX512CD -
immintrin.h
- Bit Manipulation + + + Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + tmp := 63 + dst[i+63:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + tmp := tmp - 1 + dst[i+63:i] := dst[i+63:i] + 1 + OD +ENDFOR +dst[MAX:512] := 0 + + + AVX512CD +
immintrin.h
+ Bit Manipulation
- - - - - Counts the number of leading zero bits in each packed 64-bit integer in "a", - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - tmp := 63 - dst[i+63:i] := 0 - DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + + + + + Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + tmp := 63 + dst[i+63:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) tmp := tmp - 1 dst[i+63:i] := dst[i+63:i] + 1 - OD - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512CD -
immintrin.h
- Bit Manipulation + OD + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512CD +
immintrin.h
+ Bit Manipulation
- - - - Counts the number of leading zero bits in each packed 64-bit integer in "a", - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - tmp := 63 - dst[i+63:i] := 0 - DO WHILE (tmp >= 0 AND a[i+tmp] == 0) + + + + Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + tmp := 63 + dst[i+63:i] := 0 + DO WHILE (tmp >= 0 AND a[i+tmp] == 0) tmp := tmp - 1 dst[i+63:i] := dst[i+63:i] + 1 - OD - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512CD -
immintrin.h
- Bit Manipulation -
- - - - - - - - - Compute the bitwise NOT of packed double-precision (64-bit) floating-point - elements in "a" and then AND with "b", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + OD + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512CD +
immintrin.h
+ Bit Manipulation +
+ + + + + + + + + Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise NOT of packed double-precision (64-bit) floating-point - elements in "a" and then AND with "b", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise NOT of packed double-precision (64-bit) floating-point - elements in "a" and then AND with "b", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise NOT of packed double-precision (64-bit) floating-point - elements in "a" and then AND with "b", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise NOT of packed single-precision (32-bit) floating-point - elements in "a" and then AND with "b", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise NOT of packed single-precision (32-bit) floating-point - elements in "a" and then AND with "b", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise NOT of packed single-precision (32-bit) floating-point - elements in "a" and then AND with "b", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise NOT of packed single-precision (32-bit) floating-point - elements in "a" and then AND with "b", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise AND of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise AND of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise AND of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise AND of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise AND of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise AND of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise AND of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise AND of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise OR of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] OR b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] OR b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise OR of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] OR b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] OR b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise OR of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] OR b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] OR b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise OR of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] OR b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] OR b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise OR of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] OR b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] OR b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise OR of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] OR b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] OR b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise OR of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] OR b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] OR b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise OR of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] OR b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] OR b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise XOR of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] XOR b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise XOR of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] XOR b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise XOR of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] XOR b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise XOR of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] XOR b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise XOR of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] XOR b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise XOR of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] XOR b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise XOR of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] XOR b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise XOR of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] XOR b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Logical
- - - Broadcast the lower 2 packed single-precision (32-bit) floating-point elements - from "a" to all elements of "dst". - - FOR j := 0 to 7 - i := j*32 - n := (j % 2)*32 - dst[i+31:i] := a[n+31:n] - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + Broadcast the lower 2 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*32 + n := (j % 2)*32 + dst[i+31:i] := a[n+31:n] +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Broadcast the lower 2 packed single-precision (32-bit) floating-point elements - from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - n := (j % 2)*32 - IF k[j] - dst[i+31:i] := a[n+31:n] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Broadcast the lower 2 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + n := (j % 2)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Broadcast the lower 2 packed single-precision (32-bit) floating-point elements - from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - n := (j % 2)*32 - IF k[j] - dst[i+31:i] := a[n+31:n] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + Broadcast the lower 2 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + n := (j % 2)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Broadcast the 2 packed double-precision (64-bit) floating-point elements from - "a" to all elements of "dst". - - FOR j := 0 to 3 - i := j*64 - n := (j % 2)*64 - dst[i+63:i] := a[n+63:n] - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + Broadcast the 2 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst". + +FOR j := 0 to 3 + i := j*64 + n := (j % 2)*64 + dst[i+63:i] := a[n+63:n] +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Broadcast the 2 packed double-precision (64-bit) floating-point elements from - "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - n := (j % 2)*64 - IF k[j] - dst[i+63:i] := a[n+63:n] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Broadcast the 2 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + n := (j % 2)*64 + IF k[j] + dst[i+63:i] := a[n+63:n] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Broadcast the 2 packed double-precision (64-bit) floating-point elements from - "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - n := (j % 2)*64 - IF k[j] - dst[i+63:i] := a[n+63:n] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + Broadcast the 2 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + n := (j % 2)*64 + IF k[j] + dst[i+63:i] := a[n+63:n] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst. - - FOR j := 0 to 7 - i := j*32 - n := (j % 2)*32 - dst[i+31:i] := a[n+31:n] - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst. + +FOR j := 0 to 7 + i := j*32 + n := (j % 2)*32 + dst[i+31:i] := a[n+31:n] +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 7 - i := j*32 - n := (j % 2)*32 - IF k[j] - dst[i+31:i] := a[n+31:n] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + n := (j % 2)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - n := (j % 2)*32 - IF k[j] - dst[i+31:i] := a[n+31:n] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + n := (j % 2)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst. - - FOR j := 0 to 3 - i := j*32 - n := (j % 2)*32 - dst[i+31:i] := a[n+31:n] - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst. + +FOR j := 0 to 3 + i := j*32 + n := (j % 2)*32 + dst[i+31:i] := a[n+31:n] +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 3 - i := j*32 - n := (j % 2)*32 - IF k[j] - dst[i+31:i] := a[n+31:n] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + n := (j % 2)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - n := (j % 2)*32 - IF k[j] - dst[i+31:i] := a[n+31:n] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + n := (j % 2)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Broadcast the 2 packed 64-bit integers from "a" to all elements of "dst". - - FOR j := 0 to 3 - i := j*64 - n := (j % 2)*64 - dst[i+63:i] := a[n+63:n] - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + Broadcast the 2 packed 64-bit integers from "a" to all elements of "dst". + +FOR j := 0 to 3 + i := j*64 + n := (j % 2)*64 + dst[i+63:i] := a[n+63:n] +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Broadcast the 2 packed 64-bit integers from "a" to all elements of "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - i := j*64 - n := (j % 2)*64 - IF k[j] - dst[i+63:i] := a[n+63:n] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Broadcast the 2 packed 64-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + n := (j % 2)*64 + IF k[j] + dst[i+63:i] := a[n+63:n] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Broadcast the 2 packed 64-bit integers from "a" to all elements of "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - n := (j % 2)*64 - IF k[j] - dst[i+63:i] := a[n+63:n] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + Broadcast the 2 packed 64-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + n := (j % 2)*64 + IF k[j] + dst[i+63:i] := a[n+63:n] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point - elements) from "a", selected with "imm8", and store the result in "dst". - - CASE imm8[0] OF - 0: dst[127:0] := a[127:0] - 1: dst[127:0] := a[255:128] - ESAC - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[0] OF +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +ESAC +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point - elements) from "a", selected with "imm8", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - CASE imm8[0] OF - 0: tmp[127:0] := a[127:0] - 1: tmp[127:0] := a[255:128] - ESAC - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE imm8[0] OF +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +ESAC +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point - elements) from "a", selected with "imm8", and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - CASE imm8[0] OF - 0: tmp[127:0] := a[127:0] - 1: tmp[127:0] := a[255:128] - ESAC - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE imm8[0] OF +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +ESAC +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Extract 128 bits (composed of 2 packed 64-bit integers) from "a", selected with - "imm8", and store the result in "dst". - - CASE imm8[0] OF - 0: dst[127:0] := a[127:0] - 1: dst[127:0] := a[255:128] - ESAC - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + Extract 128 bits (composed of 2 packed 64-bit integers) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[0] OF +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +ESAC +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Extract 128 bits (composed of 2 packed 64-bit integers) from "a", selected with - "imm8", and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - CASE imm8[0] OF - 0: tmp[127:0] := a[127:0] - 1: tmp[127:0] := a[255:128] - ESAC - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Extract 128 bits (composed of 2 packed 64-bit integers) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE imm8[0] OF +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +ESAC +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Extract 128 bits (composed of 2 packed 64-bit integers) from "a", selected with - "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - CASE imm8[0] OF - 0: tmp[127:0] := a[127:0] - 1: tmp[127:0] := a[255:128] - ESAC - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Extract 128 bits (composed of 2 packed 64-bit integers) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE imm8[0] OF +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +ESAC +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Test packed double-precision (64-bit) floating-point elements in "a" for - special categories specified by "imm8", and store the results in mask vector "k". - [fpclass_note] - FOR j := 0 to 3 - i := j*64 - k[j] := CheckFPClass_FP64(a[i+63:i], imm8[7:0]) - ENDFOR - k[MAX:4] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + Test packed double-precision (64-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k". + [fpclass_note] + FOR j := 0 to 3 + i := j*64 + k[j] := CheckFPClass_FP64(a[i+63:i], imm8[7:0]) +ENDFOR +k[MAX:4] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Test packed double-precision (64-bit) floating-point elements in "a" for - special categories specified by "imm8", and store the results in mask vector "k" using - zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). - [fpclass_note] - FOR j := 0 to 3 - i := j*64 - IF k1[j] - k[j] := CheckFPClass_FP64(a[i+63:i], imm8[7:0]) - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Test packed double-precision (64-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + [fpclass_note] + FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := CheckFPClass_FP64(a[i+63:i], imm8[7:0]) + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Test packed double-precision (64-bit) floating-point elements in "a" for - special categories specified by "imm8", and store the results in mask vector "k". - [fpclass_note] - FOR j := 0 to 1 - i := j*64 - k[j] := CheckFPClass_FP64(a[i+63:i], imm8[7:0]) - ENDFOR - k[MAX:2] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + Test packed double-precision (64-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k". + [fpclass_note] + FOR j := 0 to 1 + i := j*64 + k[j] := CheckFPClass_FP64(a[i+63:i], imm8[7:0]) +ENDFOR +k[MAX:2] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Test packed double-precision (64-bit) floating-point elements in "a" for - special categories specified by "imm8", and store the results in mask vector "k" using - zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). - [fpclass_note] - FOR j := 0 to 1 - i := j*64 - IF k1[j] - k[j] := CheckFPClass_FP64(a[i+63:i], imm8[7:0]) - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:2] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Test packed double-precision (64-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + [fpclass_note] + FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := CheckFPClass_FP64(a[i+63:i], imm8[7:0]) + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Test packed single-precision (32-bit) floating-point elements in "a" for - special categories specified by "imm8", and store the results in mask vector "k". - [fpclass_note] - FOR j := 0 to 7 - i := j*32 - k[j] := CheckFPClass_FP32(a[i+31:i], imm8[7:0]) - ENDFOR - k[MAX:8] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + Test packed single-precision (32-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k". + [fpclass_note] + FOR j := 0 to 7 + i := j*32 + k[j] := CheckFPClass_FP32(a[i+31:i], imm8[7:0]) +ENDFOR +k[MAX:8] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Test packed single-precision (32-bit) floating-point elements in "a" for - special categories specified by "imm8", and store the results in mask vector "k" using - zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). - [fpclass_note] - FOR j := 0 to 7 - i := j*32 - IF k1[j] - k[j] := CheckFPClass_FP32(a[i+31:i], imm8[7:0]) - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Test packed single-precision (32-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + [fpclass_note] + FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := CheckFPClass_FP32(a[i+31:i], imm8[7:0]) + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Test packed single-precision (32-bit) floating-point elements in "a" for - special categories specified by "imm8", and store the results in mask vector "k". - [fpclass_note] - FOR j := 0 to 3 - i := j*32 - k[j] := CheckFPClass_FP32(a[i+31:i], imm8[7:0]) - ENDFOR - k[MAX:4] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + Test packed single-precision (32-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k". + [fpclass_note] + FOR j := 0 to 3 + i := j*32 + k[j] := CheckFPClass_FP32(a[i+31:i], imm8[7:0]) +ENDFOR +k[MAX:4] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Test packed single-precision (32-bit) floating-point elements in "a" for - special categories specified by "imm8", and store the results in mask vector "k" using - zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). - [fpclass_note] - FOR j := 0 to 3 - i := j*32 - IF k1[j] - k[j] := CheckFPClass_FP32(a[i+31:i], imm8[7:0]) - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Test packed single-precision (32-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + [fpclass_note] + FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := CheckFPClass_FP32(a[i+31:i], imm8[7:0]) + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Copy "a" to "dst", then insert 128 bits (composed of 2 packed double-precision - (64-bit) floating-point elements) from "b" into "dst" at the location specified by - "imm8". - - dst[255:0] := a[255:0] - CASE imm8[0] OF - 0: dst[127:0] := b[127:0] - 1: dst[255:128] := b[127:0] - ESAC - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Copy "a" to "dst", then insert 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "b" into "dst" at the location specified by "imm8". + +dst[255:0] := a[255:0] +CASE imm8[0] OF +0: dst[127:0] := b[127:0] +1: dst[255:128] := b[127:0] +ESAC +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Copy "a" to "tmp", then insert 128 bits (composed of 2 packed double-precision - (64-bit) floating-point elements) from "b" into "tmp" at the location specified by - "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - tmp[255:0] := a[255:0] - CASE (imm8[0]) OF - 0: tmp[127:0] := b[127:0] - 1: tmp[255:128] := b[127:0] - ESAC - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[255:0] := a[255:0] +CASE (imm8[0]) OF +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +ESAC +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Copy "a" to "tmp", then insert 128 bits (composed of 2 packed double-precision - (64-bit) floating-point elements) from "b" into "tmp" at the location specified by - "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - tmp[255:0] := a[255:0] - CASE (imm8[0]) OF - 0: tmp[127:0] := b[127:0] - 1: tmp[255:128] := b[127:0] - ESAC - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[255:0] := a[255:0] +CASE (imm8[0]) OF +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +ESAC +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Copy "a" to "dst", then insert 128 bits (composed of 2 packed 64-bit integers) - from "b" into "dst" at the location specified by "imm8". - - dst[255:0] := a[255:0] - CASE imm8[0] OF - 0: dst[127:0] := b[127:0] - 1: dst[255:128] := b[127:0] - ESAC - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Copy "a" to "dst", then insert 128 bits (composed of 2 packed 64-bit integers) from "b" into "dst" at the location specified by "imm8". + +dst[255:0] := a[255:0] +CASE imm8[0] OF +0: dst[127:0] := b[127:0] +1: dst[255:128] := b[127:0] +ESAC +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Copy "a" to "tmp", then insert 128 bits (composed of 2 packed 64-bit integers) - from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - tmp[255:0] := a[255:0] - CASE (imm8[0]) OF - 0: tmp[127:0] := b[127:0] - 1: tmp[255:128] := b[127:0] - ESAC - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 2 packed 64-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[255:0] := a[255:0] +CASE (imm8[0]) OF +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +ESAC +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Copy "a" to "tmp", then insert 128 bits (composed of 2 packed 64-bit integers) - from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - tmp[255:0] := a[255:0] - CASE (imm8[0]) OF - 0: tmp[127:0] := b[127:0] - 1: tmp[255:128] := b[127:0] - ESAC - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 2 packed 64-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[255:0] := a[255:0] +CASE (imm8[0]) OF +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +ESAC +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Set each bit of mask register "k" based on the most significant bit of the - corresponding packed 32-bit integer in "a". - - FOR j := 0 to 7 - i := j*32 - IF a[i+31] - k[j] := 1 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + Set each bit of mask register "k" based on the most significant bit of the corresponding packed 32-bit integer in "a". + +FOR j := 0 to 7 + i := j*32 + IF a[i+31] + k[j] := 1 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Set each bit of mask register "k" based on the most significant bit of the - corresponding packed 32-bit integer in "a". - - FOR j := 0 to 3 - i := j*32 - IF a[i+31] - k[j] := 1 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + Set each bit of mask register "k" based on the most significant bit of the corresponding packed 32-bit integer in "a". + +FOR j := 0 to 3 + i := j*32 + IF a[i+31] + k[j] := 1 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Set each packed 32-bit integer in "dst" to all ones or all zeros based on the - value of the corresponding bit in "k". - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := 0xFFFFFFFF - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + Set each packed 32-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k". + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := 0xFFFFFFFF + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Set each packed 32-bit integer in "dst" to all ones or all zeros based on the - value of the corresponding bit in "k". - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := 0xFFFFFFFF - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + Set each packed 32-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k". + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := 0xFFFFFFFF + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Set each packed 64-bit integer in "dst" to all ones or all zeros based on the - value of the corresponding bit in "k". - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := 0xFFFFFFFFFFFFFFFF - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + Set each packed 64-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k". + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := 0xFFFFFFFFFFFFFFFF + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Set each packed 64-bit integer in "dst" to all ones or all zeros based on the - value of the corresponding bit in "k". - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := 0xFFFFFFFFFFFFFFFF - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + Set each packed 64-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k". + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := 0xFFFFFFFFFFFFFFFF + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Set each bit of mask register "k" based on the most significant bit of the - corresponding packed 64-bit integer in "a". - - FOR j := 0 to 3 - i := j*64 - IF a[i+63] - k[j] := 1 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + Set each bit of mask register "k" based on the most significant bit of the corresponding packed 64-bit integer in "a". + +FOR j := 0 to 3 + i := j*64 + IF a[i+63] + k[j] := 1 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Set each bit of mask register "k" based on the most significant bit of the - corresponding packed 64-bit integer in "a". - - FOR j := 0 to 1 - i := j*64 - IF a[i+63] - k[j] := 1 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:2] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + Set each bit of mask register "k" based on the most significant bit of the corresponding packed 64-bit integer in "a". + +FOR j := 0 to 1 + i := j*64 + IF a[i+63] + k[j] := 1 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. - - DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] - 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] - 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] - 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) - 1: dst[63:0] := tmp[63:0] - 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) - 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) - ESAC - - RETURN dst - } - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + +DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. - - DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] - 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] - 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] - 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) - 1: dst[63:0] := tmp[63:0] - 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) - 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) - ESAC - - RETURN dst - } - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + +DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and - store the results in "dst". - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. - - DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] - 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] - 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] - 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) - 1: dst[63:0] := tmp[63:0] - 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) - 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) - ESAC - - RETURN dst - } - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + +DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. - - DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] - 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] - 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] - 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) - 1: dst[63:0] := tmp[63:0] - 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) - 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) - ESAC - - RETURN dst - } - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + +DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. - - DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] - 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] - 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] - 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) - 1: dst[63:0] := tmp[63:0] - 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) - 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) - ESAC - - RETURN dst - } - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + +DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and - store the results in "dst". - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. - - DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] - 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] - 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] - 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) - 1: dst[63:0] := tmp[63:0] - 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) - 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) - ESAC - - RETURN dst - } - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + +DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. - - DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] - 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] - 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] - 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) - 1: dst[31:0] := tmp[63:0] - 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) - 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) - ESAC - - RETURN dst - } - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + +DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[63:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. - - DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] - 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] - 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] - 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) - 1: dst[31:0] := tmp[63:0] - 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) - 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) - ESAC - - RETURN dst - } - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + +DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[63:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and - store the results in "dst". - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. - - DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] - 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] - 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] - 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) - 1: dst[31:0] := tmp[63:0] - 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) - 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) - ESAC - - RETURN dst - } - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + +DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[63:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. - - DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] - 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] - 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] - 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) - 1: dst[31:0] := tmp[63:0] - 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) - 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) - ESAC - - RETURN dst - } - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + +DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[63:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. - - DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] - 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] - 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] - 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) - 1: dst[31:0] := tmp[63:0] - 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) - 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) - ESAC - - RETURN dst - } - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + +DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[63:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and - store the results in "dst". - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. - - DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] - 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] - 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] - 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) - 1: dst[31:0] := tmp[63:0] - 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) - 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) - ESAC - - RETURN dst - } - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + +DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[63:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Extract the reduced argument of packed double-precision (64-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). [round_imm_note] - - DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - tmp[63:0] := src1[63:0] - tmp[63:0] - IF IsInf(tmp[63:0]) - tmp[63:0] := FP64(0.0) - FI - RETURN tmp[63:0] - } - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note] + +DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + tmp[63:0] := src1[63:0] - tmp[63:0] + IF IsInf(tmp[63:0]) + tmp[63:0] := FP64(0.0) + FI + RETURN tmp[63:0] +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Extract the reduced argument of packed double-precision (64-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). [round_imm_note] - - DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - tmp[63:0] := src1[63:0] - tmp[63:0] - IF IsInf(tmp[63:0]) - tmp[63:0] := FP64(0.0) - FI - RETURN tmp[63:0] - } - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note] + +DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + tmp[63:0] := src1[63:0] - tmp[63:0] + IF IsInf(tmp[63:0]) + tmp[63:0] := FP64(0.0) + FI + RETURN tmp[63:0] +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Extract the reduced argument of packed double-precision (64-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst". [round_imm_note] - - DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - tmp[63:0] := src1[63:0] - tmp[63:0] - IF IsInf(tmp[63:0]) - tmp[63:0] := FP64(0.0) - FI - RETURN tmp[63:0] - } - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". [round_imm_note] + +DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + tmp[63:0] := src1[63:0] - tmp[63:0] + IF IsInf(tmp[63:0]) + tmp[63:0] := FP64(0.0) + FI + RETURN tmp[63:0] +} +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Extract the reduced argument of packed double-precision (64-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). [round_imm_note] - - DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - tmp[63:0] := src1[63:0] - tmp[63:0] - IF IsInf(tmp[63:0]) - tmp[63:0] := FP64(0.0) - FI - RETURN tmp[63:0] - } - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note] + +DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + tmp[63:0] := src1[63:0] - tmp[63:0] + IF IsInf(tmp[63:0]) + tmp[63:0] := FP64(0.0) + FI + RETURN tmp[63:0] +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Extract the reduced argument of packed double-precision (64-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). [round_imm_note] - - DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - tmp[63:0] := src1[63:0] - tmp[63:0] - IF IsInf(tmp[63:0]) - tmp[63:0] := FP64(0.0) - FI - RETURN tmp[63:0] - } - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note] + +DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + tmp[63:0] := src1[63:0] - tmp[63:0] + IF IsInf(tmp[63:0]) + tmp[63:0] := FP64(0.0) + FI + RETURN tmp[63:0] +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Extract the reduced argument of packed double-precision (64-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst". [round_imm_note] - - DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - tmp[63:0] := src1[63:0] - tmp[63:0] - IF IsInf(tmp[63:0]) - tmp[63:0] := FP64(0.0) - FI - RETURN tmp[63:0] - } - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". [round_imm_note] + +DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + tmp[63:0] := src1[63:0] - tmp[63:0] + IF IsInf(tmp[63:0]) + tmp[63:0] := FP64(0.0) + FI + RETURN tmp[63:0] +} +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Extract the reduced argument of packed single-precision (32-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). [round_imm_note] - - DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - tmp[31:0] := src1[31:0] - tmp[31:0] - IF IsInf(tmp[31:0]) - tmp[31:0] := FP32(0.0) - FI - RETURN tmp[31:0] - } - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note] + +DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + tmp[31:0] := src1[31:0] - tmp[31:0] + IF IsInf(tmp[31:0]) + tmp[31:0] := FP32(0.0) + FI + RETURN tmp[31:0] +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Extract the reduced argument of packed single-precision (32-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). [round_imm_note] - - DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - tmp[31:0] := src1[31:0] - tmp[31:0] - IF IsInf(tmp[31:0]) - tmp[31:0] := FP32(0.0) - FI - RETURN tmp[31:0] - } - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note] + +DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + tmp[31:0] := src1[31:0] - tmp[31:0] + IF IsInf(tmp[31:0]) + tmp[31:0] := FP32(0.0) + FI + RETURN tmp[31:0] +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Extract the reduced argument of packed single-precision (32-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst". [round_imm_note] - - DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - tmp[31:0] := src1[31:0] - tmp[31:0] - RETURN tmp[31:0] - IF IsInf(tmp[31:0]) - tmp[31:0] := FP32(0.0) - FI - } - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". [round_imm_note] + +DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + tmp[31:0] := src1[31:0] - tmp[31:0] + RETURN tmp[31:0] + IF IsInf(tmp[31:0]) + tmp[31:0] := FP32(0.0) + FI +} +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Extract the reduced argument of packed single-precision (32-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). [round_imm_note] - - DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - tmp[31:0] := src1[31:0] - tmp[31:0] - IF IsInf(tmp[31:0]) - tmp[31:0] := FP32(0.0) - FI - RETURN tmp[31:0] - } - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note] + +DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + tmp[31:0] := src1[31:0] - tmp[31:0] + IF IsInf(tmp[31:0]) + tmp[31:0] := FP32(0.0) + FI + RETURN tmp[31:0] +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Extract the reduced argument of packed single-precision (32-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). [round_imm_note] - - DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - tmp[31:0] := src1[31:0] - tmp[31:0] - IF IsInf(tmp[31:0]) - tmp[31:0] := FP32(0.0) - FI - RETURN tmp[31:0] - } - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note] + +DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + tmp[31:0] := src1[31:0] - tmp[31:0] + IF IsInf(tmp[31:0]) + tmp[31:0] := FP32(0.0) + FI + RETURN tmp[31:0] +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Extract the reduced argument of packed single-precision (32-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst". [round_imm_note] - - DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - tmp[31:0] := src1[31:0] - tmp[31:0] - IF IsInf(tmp[31:0]) - tmp[31:0] := FP32(0.0) - FI - RETURN tmp[31:0] - } - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Miscellaneous + + + + Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". [round_imm_note] + +DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + tmp[31:0] := src1[31:0] - tmp[31:0] + IF IsInf(tmp[31:0]) + tmp[31:0] := FP32(0.0) + FI + RETURN tmp[31:0] +} +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 64-bit integers, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 64-bit integers, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 64-bit integers, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 64-bit integers, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 64-bit integers, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - l := j*32 - dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 64-bit integers, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - l := j*32 - dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 64-bit integers, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - l := j*32 - dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 64-bit integers, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - l := j*32 - dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed signed 64-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + Convert packed signed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed signed 64-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed signed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed signed 64-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + Convert packed signed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed signed 64-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + Convert packed signed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed signed 64-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed signed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed signed 64-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + Convert packed signed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed signed 64-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - l := j*32 - dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + Convert packed signed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + l := j*32 + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed signed 64-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - l := j*32 - IF k[j] - dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) - ELSE - dst[l+31:l] := src[l+31:l] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed signed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed signed 64-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - l := j*32 - IF k[j] - dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) - ELSE - dst[l+31:l] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + Convert packed signed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed signed 64-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - l := j*32 - dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + Convert packed signed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + l := j*32 + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed signed 64-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - l := j*32 - IF k[j] - dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) - ELSE - dst[l+31:l] := src[l+31:l] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed signed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed signed 64-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - l := j*32 - IF k[j] - dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) - ELSE - dst[l+31:l] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + Convert packed signed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 64-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 64-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 64-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 64-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 64-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 64-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 64-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 64-bit integers with truncation, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 64-bit integers with truncation, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 64-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 64-bit integers with truncation, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 64-bit integers with truncation, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 64-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - l := j*32 - dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 64-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 64-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 64-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - l := j*32 - dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 64-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 64-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 64-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - l := j*32 - dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 64-bit integers with truncation, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 64-bit integers with truncation, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 64-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - l := j*32 - dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 64-bit integers with truncation, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 1 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 64-bit integers with truncation, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed unsigned 64-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 64-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed unsigned 64-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed unsigned 64-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 64-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed unsigned 64-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed unsigned 64-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - l := j*32 - dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + l := j*32 + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 64-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - l := j*32 - IF k[j] - dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) - ELSE - dst[l+31:l] := src[l+31:l] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed unsigned 64-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - l := j*32 - IF k[j] - dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) - ELSE - dst[l+31:l] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed unsigned 64-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - l := j*32 - dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + l := j*32 + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 64-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - l := j*32 - IF k[j] - dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) - ELSE - dst[l+31:l] := src[l+31:l] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed unsigned 64-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - l := j*32 - IF k[j] - dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) - ELSE - dst[l+31:l] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Convert
- - - - - - Multiply the packed 64-bit integers in "a" and "b", producing intermediate - 128-bit integers, and store the low 64 bits of the intermediate integers in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - tmp[127:0] := a[i+63:i] * b[i+63:i] - dst[i+63:i] := tmp[63:0] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + tmp[127:0] := a[i+63:i] * b[i+63:i] + dst[i+63:i] := tmp[63:0] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply the packed 64-bit integers in "a" and "b", producing intermediate - 128-bit integers, and store the low 64 bits of the intermediate integers in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - tmp[127:0] := a[i+63:i] * b[i+63:i] - dst[i+63:i] := tmp[63:0] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + tmp[127:0] := a[i+63:i] * b[i+63:i] + dst[i+63:i] := tmp[63:0] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Multiply the packed 64-bit integers in "a" and "b", producing intermediate - 128-bit integers, and store the low 64 bits of the intermediate integers in "dst". - - FOR j := 0 to 3 - i := j*64 - tmp[127:0] := a[i+63:i] * b[i+63:i] - dst[i+63:i] := tmp[63:0] - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Arithmetic + + + + Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst". + +FOR j := 0 to 3 + i := j*64 + tmp[127:0] := a[i+63:i] * b[i+63:i] + dst[i+63:i] := tmp[63:0] +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply the packed 64-bit integers in "a" and "b", producing intermediate - 128-bit integers, and store the low 64 bits of the intermediate integers in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - tmp[127:0] := a[i+63:i] * b[i+63:i] - dst[i+63:i] := tmp[63:0] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + tmp[127:0] := a[i+63:i] * b[i+63:i] + dst[i+63:i] := tmp[63:0] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply the packed 64-bit integers in "a" and "b", producing intermediate - 128-bit integers, and store the low 64 bits of the intermediate integers in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - tmp[127:0] := a[i+63:i] * b[i+63:i] - dst[i+63:i] := tmp[63:0] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + tmp[127:0] := a[i+63:i] * b[i+63:i] + dst[i+63:i] := tmp[63:0] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Multiply the packed 64-bit integers in "a" and "b", producing intermediate - 128-bit integers, and store the low 64 bits of the intermediate integers in "dst". - - FOR j := 0 to 1 - i := j*64 - tmp[127:0] := a[i+63:i] * b[i+63:i] - dst[i+63:i] := tmp[63:0] - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ - AVX512VL -
immintrin.h
- Arithmetic -
- - - - - - - Compute the bitwise NOT of packed double-precision (64-bit) floating-point - elements in "a" and then AND with "b", and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Logical + + + + Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst". + +FOR j := 0 to 1 + i := j*64 + tmp[127:0] := a[i+63:i] * b[i+63:i] + dst[i+63:i] := tmp[63:0] +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ + AVX512VL +
immintrin.h
+ Arithmetic +
+ + + + + + + Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Logical
- - - - - - Compute the bitwise NOT of packed double-precision (64-bit) floating-point - elements in "a" and then AND with "b", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Logical + + + + + + Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Logical
- - - - - Compute the bitwise NOT of packed double-precision (64-bit) floating-point - elements in "a" and then AND with "b", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Logical + + + + + Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Logical
- - - - Compute the bitwise NOT of packed single-precision (32-bit) floating-point - elements in "a" and then AND with "b", and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Logical + + + + Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Logical
- - - - - - Compute the bitwise NOT of packed single-precision (32-bit) floating-point - elements in "a" and then AND with "b", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Logical + + + + + + Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Logical
- - - - - Compute the bitwise NOT of packed single-precision (32-bit) floating-point - elements in "a" and then AND with "b", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Logical + + + + + Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Logical
- - - - Compute the bitwise AND of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Logical + + + + Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Logical
- - - - - - Compute the bitwise AND of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Logical + + + + + + Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Logical
- - - - - Compute the bitwise AND of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Logical + + + + + Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Logical
- - - - Compute the bitwise AND of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Logical + + + + Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Logical
- - - - - - Compute the bitwise AND of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Logical + + + + + + Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Logical
- - - - - Compute the bitwise AND of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Logical + + + + + Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Logical
- - - - - - Compute the bitwise OR of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] OR b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Logical + + + + + + Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] OR b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Logical
- - - - - Compute the bitwise OR of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] OR b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Logical + + + + + Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] OR b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Logical
- - - - Compute the bitwise OR of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := a[i+63:i] OR b[i+63:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Logical + + + + Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] OR b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Logical
- - - - - - Compute the bitwise OR of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] OR b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Logical + + + + + + Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] OR b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Logical
- - - - - Compute the bitwise OR of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] OR b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Logical + + + + + Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] OR b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Logical
- - - - Compute the bitwise OR of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := a[i+31:i] OR b[i+31:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Logical + + + + Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] OR b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Logical
- - - - - - Compute the bitwise XOR of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] XOR b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Logical + + + + + + Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Logical
- - - - - Compute the bitwise XOR of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] XOR b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Logical + + + + + Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Logical
- - - - Compute the bitwise XOR of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := a[i+63:i] XOR b[i+63:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Logical + + + + Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Logical
- - - - - - Compute the bitwise XOR of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] XOR b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Logical + + + + + + Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Logical
- - - - - Compute the bitwise XOR of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] XOR b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Logical + + + + + Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Logical
- - - - Compute the bitwise XOR of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := a[i+31:i] XOR b[i+31:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Logical + + + + Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Logical
- - - Broadcast the lower 2 packed single-precision (32-bit) floating-point elements - from "a" to all elements of "dst". - - FOR j := 0 to 15 - i := j*32 - n := (j % 2)*32 - dst[i+31:i] := a[n+31:n] - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + Broadcast the lower 2 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst". + +FOR j := 0 to 15 + i := j*32 + n := (j % 2)*32 + dst[i+31:i] := a[n+31:n] +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - Broadcast the lower 2 packed single-precision (32-bit) floating-point elements - from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - n := (j % 2)*32 - IF k[j] - dst[i+31:i] := a[n+31:n] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + Broadcast the lower 2 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + n := (j % 2)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - Broadcast the lower 2 packed single-precision (32-bit) floating-point elements - from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - n := (j % 2)*32 - IF k[j] - dst[i+31:i] := a[n+31:n] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + Broadcast the lower 2 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + n := (j % 2)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - Broadcast the 8 packed single-precision (32-bit) floating-point elements from - "a" to all elements of "dst". - - FOR j := 0 to 15 - i := j*32 - n := (j % 8)*32 - dst[i+31:i] := a[n+31:n] - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + Broadcast the 8 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst". + +FOR j := 0 to 15 + i := j*32 + n := (j % 8)*32 + dst[i+31:i] := a[n+31:n] +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - Broadcast the 8 packed single-precision (32-bit) floating-point elements from - "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - n := (j % 8)*32 - IF k[j] - dst[i+31:i] := a[n+31:n] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + Broadcast the 8 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + n := (j % 8)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - Broadcast the 8 packed single-precision (32-bit) floating-point elements from - "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - n := (j % 8)*32 - IF k[j] - dst[i+31:i] := a[n+31:n] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + Broadcast the 8 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + n := (j % 8)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - Broadcast the 2 packed double-precision (64-bit) floating-point elements from - "a" to all elements of "dst". - - FOR j := 0 to 7 - i := j*64 - n := (j % 2)*64 - dst[i+63:i] := a[n+63:n] - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + Broadcast the 2 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*64 + n := (j % 2)*64 + dst[i+63:i] := a[n+63:n] +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - Broadcast the 2 packed double-precision (64-bit) floating-point elements from - "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - n := (j % 2)*64 - IF k[j] - dst[i+63:i] := a[n+63:n] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + Broadcast the 2 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + n := (j % 2)*64 + IF k[j] + dst[i+63:i] := a[n+63:n] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - Broadcast the 2 packed double-precision (64-bit) floating-point elements from - "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - n := (j % 2)*64 - IF k[j] - dst[i+63:i] := a[n+63:n] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + Broadcast the 2 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + n := (j % 2)*64 + IF k[j] + dst[i+63:i] := a[n+63:n] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst. - - FOR j := 0 to 15 - i := j*32 - n := (j % 2)*32 - dst[i+31:i] := a[n+31:n] - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst. + +FOR j := 0 to 15 + i := j*32 + n := (j % 2)*32 + dst[i+31:i] := a[n+31:n] +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 15 - i := j*32 - n := (j % 2)*32 - IF k[j] - dst[i+31:i] := a[n+31:n] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + n := (j % 2)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - n := (j % 2)*32 - IF k[j] - dst[i+31:i] := a[n+31:n] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + n := (j % 2)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - Broadcast the 8 packed 32-bit integers from "a" to all elements of "dst". - - FOR j := 0 to 15 - i := j*32 - n := (j % 8)*32 - dst[i+31:i] := a[n+31:n] - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + Broadcast the 8 packed 32-bit integers from "a" to all elements of "dst". + +FOR j := 0 to 15 + i := j*32 + n := (j % 8)*32 + dst[i+31:i] := a[n+31:n] +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - Broadcast the 8 packed 32-bit integers from "a" to all elements of "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - i := j*32 - n := (j % 8)*32 - IF k[j] - dst[i+31:i] := a[n+31:n] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + Broadcast the 8 packed 32-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + n := (j % 8)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - Broadcast the 8 packed 32-bit integers from "a" to all elements of "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - n := (j % 8)*32 - IF k[j] - dst[i+31:i] := a[n+31:n] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + Broadcast the 8 packed 32-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + n := (j % 8)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - Broadcast the 2 packed 64-bit integers from "a" to all elements of "dst". - - FOR j := 0 to 7 - i := j*64 - n := (j % 2)*64 - dst[i+63:i] := a[n+63:n] - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + Broadcast the 2 packed 64-bit integers from "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*64 + n := (j % 2)*64 + dst[i+63:i] := a[n+63:n] +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - Broadcast the 2 packed 64-bit integers from "a" to all elements of "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*64 - n := (j % 2)*64 - IF k[j] - dst[i+63:i] := a[n+63:n] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + Broadcast the 2 packed 64-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + n := (j % 2)*64 + IF k[j] + dst[i+63:i] := a[n+63:n] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - Broadcast the 2 packed 64-bit integers from "a" to all elements of "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - n := (j % 2)*64 - IF k[j] - dst[i+63:i] := a[n+63:n] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + Broadcast the 2 packed 64-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + n := (j % 2)*64 + IF k[j] + dst[i+63:i] := a[n+63:n] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - Extract 256 bits (composed of 8 packed single-precision (32-bit) floating-point - elements) from "a", selected with "imm8", and store the result in "dst". - - CASE imm8[0] OF - 0: dst[255:0] := a[255:0] - 1: dst[255:0] := a[511:256] - ESAC - dst[MAX:256] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + Extract 256 bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[0] OF +0: dst[255:0] := a[255:0] +1: dst[255:0] := a[511:256] +ESAC +dst[MAX:256] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - Extract 256 bits (composed of 8 packed single-precision (32-bit) floating-point - elements) from "a", selected with "imm8", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - CASE imm8[0] OF - 0: tmp[255:0] := a[255:0] - 1: tmp[255:0] := a[511:256] - ESAC - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + Extract 256 bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE imm8[0] OF +0: tmp[255:0] := a[255:0] +1: tmp[255:0] := a[511:256] +ESAC +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - Extract 256 bits (composed of 8 packed single-precision (32-bit) floating-point - elements) from "a", selected with "imm8", and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - CASE imm8[0] OF - 0: tmp[255:0] := a[255:0] - 1: tmp[255:0] := a[511:256] - ESAC - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + Extract 256 bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE imm8[0] OF +0: tmp[255:0] := a[255:0] +1: tmp[255:0] := a[511:256] +ESAC +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point - elements) from "a", selected with "imm8", and store the result in "dst". - - CASE imm8[1:0] OF - 0: dst[127:0] := a[127:0] - 1: dst[127:0] := a[255:128] - 2: dst[127:0] := a[383:256] - 3: dst[127:0] := a[511:384] - ESAC - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[1:0] OF +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +2: dst[127:0] := a[383:256] +3: dst[127:0] := a[511:384] +ESAC +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point - elements) from "a", selected with "imm8", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - CASE imm8[1:0] OF - 0: tmp[127:0] := a[127:0] - 1: tmp[127:0] := a[255:128] - 2: tmp[127:0] := a[383:256] - 3: tmp[127:0] := a[511:384] - ESAC - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE imm8[1:0] OF +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +2: tmp[127:0] := a[383:256] +3: tmp[127:0] := a[511:384] +ESAC +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point - elements) from "a", selected with "imm8", and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - CASE imm8[1:0] OF - 0: tmp[127:0] := a[127:0] - 1: tmp[127:0] := a[255:128] - 2: tmp[127:0] := a[383:256] - 3: tmp[127:0] := a[511:384] - ESAC - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE imm8[1:0] OF +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +2: tmp[127:0] := a[383:256] +3: tmp[127:0] := a[511:384] +ESAC +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - Extract 256 bits (composed of 8 packed 32-bit integers) from "a", selected with - "imm8", and store the result in "dst". - - CASE imm8[0] OF - 0: dst[255:0] := a[255:0] - 1: dst[255:0] := a[511:256] - ESAC - dst[MAX:256] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + Extract 256 bits (composed of 8 packed 32-bit integers) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[0] OF +0: dst[255:0] := a[255:0] +1: dst[255:0] := a[511:256] +ESAC +dst[MAX:256] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - Extract 256 bits (composed of 8 packed 32-bit integers) from "a", selected with - "imm8", and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - CASE imm8[0] OF - 0: tmp[255:0] := a[255:0] - 1: tmp[255:0] := a[511:256] - ESAC - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + Extract 256 bits (composed of 8 packed 32-bit integers) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE imm8[0] OF +0: tmp[255:0] := a[255:0] +1: tmp[255:0] := a[511:256] +ESAC +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - Extract 256 bits (composed of 8 packed 32-bit integers) from "a", selected with - "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - CASE imm8[0] OF - 0: tmp[255:0] := a[255:0] - 1: tmp[255:0] := a[511:256] - ESAC - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + Extract 256 bits (composed of 8 packed 32-bit integers) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE imm8[0] OF +0: tmp[255:0] := a[255:0] +1: tmp[255:0] := a[511:256] +ESAC +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - Extract 128 bits (composed of 2 packed 64-bit integers) from "a", selected with - "imm8", and store the result in "dst". - - CASE imm8[1:0] OF - 0: dst[127:0] := a[127:0] - 1: dst[127:0] := a[255:128] - 2: dst[127:0] := a[383:256] - 3: dst[127:0] := a[511:384] - ESAC - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + Extract 128 bits (composed of 2 packed 64-bit integers) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[1:0] OF +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +2: dst[127:0] := a[383:256] +3: dst[127:0] := a[511:384] +ESAC +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - Extract 128 bits (composed of 2 packed 64-bit integers) from "a", selected with - "imm8", and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - CASE imm8[1:0] OF - 0: tmp[127:0] := a[127:0] - 1: tmp[127:0] := a[255:128] - 2: tmp[127:0] := a[383:256] - 3: tmp[127:0] := a[511:384] - ESAC - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + Extract 128 bits (composed of 2 packed 64-bit integers) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE imm8[1:0] OF +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +2: tmp[127:0] := a[383:256] +3: tmp[127:0] := a[511:384] +ESAC +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - Extract 128 bits (composed of 2 packed 64-bit integers) from "a", selected with - "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - CASE imm8[1:0] OF - 0: tmp[127:0] := a[127:0] - 1: tmp[127:0] := a[255:128] - 2: tmp[127:0] := a[383:256] - 3: tmp[127:0] := a[511:384] - ESAC - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + Extract 128 bits (composed of 2 packed 64-bit integers) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE imm8[1:0] OF +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +2: tmp[127:0] := a[383:256] +3: tmp[127:0] := a[511:384] +ESAC +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - Test packed double-precision (64-bit) floating-point elements in "a" for - special categories specified by "imm8", and store the results in mask vector "k". - [fpclass_note] - FOR j := 0 to 7 - i := j*64 - k[j] := CheckFPClass_FP64(a[i+63:i], imm8[7:0]) - ENDFOR - k[MAX:8] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + Test packed double-precision (64-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k". + [fpclass_note] + FOR j := 0 to 7 + i := j*64 + k[j] := CheckFPClass_FP64(a[i+63:i], imm8[7:0]) +ENDFOR +k[MAX:8] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - Test packed double-precision (64-bit) floating-point elements in "a" for - special categories specified by "imm8", and store the results in mask vector "k" using - zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). - [fpclass_note] - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := CheckFPClass_FP64(a[i+63:i], imm8[7:0]) - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + Test packed double-precision (64-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + [fpclass_note] + FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := CheckFPClass_FP64(a[i+63:i], imm8[7:0]) + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - Test packed single-precision (32-bit) floating-point elements in "a" for - special categories specified by "imm8", and store the results in mask vector "k". - [fpclass_note] - FOR j := 0 to 15 - i := j*32 - k[j] := CheckFPClass_FP32(a[i+31:i], imm8[7:0]) - ENDFOR - k[MAX:16] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + Test packed single-precision (32-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k". + [fpclass_note] + FOR j := 0 to 15 + i := j*32 + k[j] := CheckFPClass_FP32(a[i+31:i], imm8[7:0]) +ENDFOR +k[MAX:16] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - Test packed single-precision (32-bit) floating-point elements in "a" for - special categories specified by "imm8", and store the results in mask vector "k" using - zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). - [fpclass_note] - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := CheckFPClass_FP32(a[i+31:i], imm8[7:0]) - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + Test packed single-precision (32-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + [fpclass_note] + FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := CheckFPClass_FP32(a[i+31:i], imm8[7:0]) + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - Test the lower double-precision (64-bit) floating-point element in "a" for - special categories specified by "imm8", and store the result in mask vector "k". - [fpclass_note] - k[0] := CheckFPClass_FP64(a[63:0], imm8[7:0]) - k[MAX:1] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + Test the lower double-precision (64-bit) floating-point element in "a" for special categories specified by "imm8", and store the result in mask vector "k". + [fpclass_note] + k[0] := CheckFPClass_FP64(a[63:0], imm8[7:0]) +k[MAX:1] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - Test the lower double-precision (64-bit) floating-point element in "a" for - special categories specified by "imm8", and store the result in mask vector "k" using - zeromask "k1" (the element is zeroed out when mask bit 0 is not set). - [fpclass_note] - IF k1[0] - k[0] := CheckFPClass_FP64(a[63:0], imm8[7:0]) - ELSE - k[0] := 0 - FI - k[MAX:1] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + Test the lower double-precision (64-bit) floating-point element in "a" for special categories specified by "imm8", and store the result in mask vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set). + [fpclass_note] + IF k1[0] + k[0] := CheckFPClass_FP64(a[63:0], imm8[7:0]) +ELSE + k[0] := 0 +FI +k[MAX:1] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - Test the lower single-precision (32-bit) floating-point element in "a" for - special categories specified by "imm8", and store the result in mask vector "k. - [fpclass_note] - k[0] := CheckFPClass_FP32(a[31:0], imm8[7:0]) - k[MAX:1] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + Test the lower single-precision (32-bit) floating-point element in "a" for special categories specified by "imm8", and store the result in mask vector "k. + [fpclass_note] + k[0] := CheckFPClass_FP32(a[31:0], imm8[7:0]) +k[MAX:1] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - Test the lower single-precision (32-bit) floating-point element in "a" for - special categories specified by "imm8", and store the result in mask vector "k" using - zeromask "k1" (the element is zeroed out when mask bit 0 is not set). - [fpclass_note] - IF k1[0] - k[0] := CheckFPClass_FP32(a[31:0], imm8[7:0]) - ELSE - k[0] := 0 - FI - k[MAX:1] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + Test the lower single-precision (32-bit) floating-point element in "a" for special categories specified by "imm8", and store the result in mask vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set). + [fpclass_note] + IF k1[0] + k[0] := CheckFPClass_FP32(a[31:0], imm8[7:0]) +ELSE + k[0] := 0 +FI +k[MAX:1] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - Copy "a" to "dst", then insert 256 bits (composed of 8 packed single-precision - (32-bit) floating-point elements) from "b" into "dst" at the location specified by - "imm8". - - dst[511:0] := a[511:0] - CASE (imm8[0]) OF - 0: dst[255:0] := b[255:0] - 1: dst[511:256] := b[255:0] - ESAC - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + Copy "a" to "dst", then insert 256 bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "b" into "dst" at the location specified by "imm8". + +dst[511:0] := a[511:0] +CASE (imm8[0]) OF +0: dst[255:0] := b[255:0] +1: dst[511:256] := b[255:0] +ESAC +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - - Copy "a" to "tmp", then insert 256 bits (composed of 8 packed single-precision - (32-bit) floating-point elements) from "b" into "tmp" at the location specified by - "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - tmp[511:0] := a[511:0] - CASE (imm8[0]) OF - 0: tmp[255:0] := b[255:0] - 1: tmp[511:256] := b[255:0] - ESAC - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + + Copy "a" to "tmp", then insert 256 bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[511:0] := a[511:0] +CASE (imm8[0]) OF +0: tmp[255:0] := b[255:0] +1: tmp[511:256] := b[255:0] +ESAC +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - Copy "a" to "tmp", then insert 256 bits (composed of 8 packed single-precision - (32-bit) floating-point elements) from "b" into "tmp" at the location specified by - "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - tmp[511:0] := a[511:0] - CASE (imm8[0]) OF - 0: tmp[255:0] := b[255:0] - 1: tmp[511:256] := b[255:0] - ESAC - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + Copy "a" to "tmp", then insert 256 bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[511:0] := a[511:0] +CASE (imm8[0]) OF +0: tmp[255:0] := b[255:0] +1: tmp[511:256] := b[255:0] +ESAC +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - Copy "a" to "dst", then insert 128 bits (composed of 2 packed double-precision - (64-bit) floating-point elements) from "b" into "dst" at the location specified by - "imm8". - - dst[511:0] := a[511:0] - CASE imm8[1:0] OF - 0: dst[127:0] := b[127:0] - 1: dst[255:128] := b[127:0] - 2: dst[383:256] := b[127:0] - 3: dst[511:384] := b[127:0] - ESAC - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + Copy "a" to "dst", then insert 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "b" into "dst" at the location specified by "imm8". + +dst[511:0] := a[511:0] +CASE imm8[1:0] OF +0: dst[127:0] := b[127:0] +1: dst[255:128] := b[127:0] +2: dst[383:256] := b[127:0] +3: dst[511:384] := b[127:0] +ESAC +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - - Copy "a" to "tmp", then insert 128 bits (composed of 2 packed double-precision - (64-bit) floating-point elements) from "b" into "tmp" at the location specified by - "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - tmp[511:0] := a[511:0] - CASE (imm8[1:0]) OF - 0: tmp[127:0] := b[127:0] - 1: tmp[255:128] := b[127:0] - 2: tmp[383:256] := b[127:0] - 3: tmp[511:384] := b[127:0] - ESAC - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[511:0] := a[511:0] +CASE (imm8[1:0]) OF +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +2: tmp[383:256] := b[127:0] +3: tmp[511:384] := b[127:0] +ESAC +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - Copy "a" to "tmp", then insert 128 bits (composed of 2 packed double-precision - (64-bit) floating-point elements) from "b" into "tmp" at the location specified by - "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - tmp[511:0] := a[511:0] - CASE (imm8[1:0]) OF - 0: tmp[127:0] := b[127:0] - 1: tmp[255:128] := b[127:0] - 2: tmp[383:256] := b[127:0] - 3: tmp[511:384] := b[127:0] - ESAC - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[511:0] := a[511:0] +CASE (imm8[1:0]) OF +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +2: tmp[383:256] := b[127:0] +3: tmp[511:384] := b[127:0] +ESAC +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - Copy "a" to "dst", then insert 256 bits (composed of 8 packed 32-bit integers) - from "b" into "dst" at the location specified by "imm8". - - dst[511:0] := a[511:0] - CASE imm8[0] OF - 0: dst[255:0] := b[255:0] - 1: dst[511:256] := b[255:0] - ESAC - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + Copy "a" to "dst", then insert 256 bits (composed of 8 packed 32-bit integers) from "b" into "dst" at the location specified by "imm8". + +dst[511:0] := a[511:0] +CASE imm8[0] OF +0: dst[255:0] := b[255:0] +1: dst[511:256] := b[255:0] +ESAC +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - - Copy "a" to "tmp", then insert 256 bits (composed of 8 packed 32-bit integers) - from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - tmp[511:0] := a[511:0] - CASE (imm8[0]) OF - 0: tmp[255:0] := b[255:0] - 1: tmp[511:256] := b[255:0] - ESAC - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + + Copy "a" to "tmp", then insert 256 bits (composed of 8 packed 32-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[511:0] := a[511:0] +CASE (imm8[0]) OF +0: tmp[255:0] := b[255:0] +1: tmp[511:256] := b[255:0] +ESAC +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - Copy "a" to "tmp", then insert 256 bits (composed of 8 packed 32-bit integers) - from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - tmp[511:0] := a[511:0] - CASE (imm8[0]) OF - 0: tmp[255:0] := b[255:0] - 1: tmp[511:256] := b[255:0] - ESAC - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + Copy "a" to "tmp", then insert 256 bits (composed of 8 packed 32-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[511:0] := a[511:0] +CASE (imm8[0]) OF +0: tmp[255:0] := b[255:0] +1: tmp[511:256] := b[255:0] +ESAC +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - Copy "a" to "dst", then insert 128 bits (composed of 2 packed 64-bit integers) - from "b" into "dst" at the location specified by "imm8". - - dst[511:0] := a[511:0] - CASE imm8[1:0] OF - 0: dst[127:0] := b[127:0] - 1: dst[255:128] := b[127:0] - 2: dst[383:256] := b[127:0] - 3: dst[511:384] := b[127:0] - ESAC - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + Copy "a" to "dst", then insert 128 bits (composed of 2 packed 64-bit integers) from "b" into "dst" at the location specified by "imm8". + +dst[511:0] := a[511:0] +CASE imm8[1:0] OF +0: dst[127:0] := b[127:0] +1: dst[255:128] := b[127:0] +2: dst[383:256] := b[127:0] +3: dst[511:384] := b[127:0] +ESAC +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - - Copy "a" to "tmp", then insert 128 bits (composed of 2 packed 64-bit integers) - from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - tmp[511:0] := a[511:0] - CASE (imm8[1:0]) OF - 0: tmp[127:0] := b[127:0] - 1: tmp[255:128] := b[127:0] - 2: tmp[383:256] := b[127:0] - 3: tmp[511:384] := b[127:0] - ESAC - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 2 packed 64-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[511:0] := a[511:0] +CASE (imm8[1:0]) OF +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +2: tmp[383:256] := b[127:0] +3: tmp[511:384] := b[127:0] +ESAC +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - Copy "a" to "tmp", then insert 128 bits (composed of 2 packed 64-bit integers) - from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - tmp[511:0] := a[511:0] - CASE (imm8[1:0]) OF - 0: tmp[127:0] := b[127:0] - 1: tmp[255:128] := b[127:0] - 2: tmp[383:256] := b[127:0] - 3: tmp[511:384] := b[127:0] - ESAC - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 2 packed 64-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[511:0] := a[511:0] +CASE (imm8[1:0]) OF +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +2: tmp[383:256] := b[127:0] +3: tmp[511:384] := b[127:0] +ESAC +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - Set each bit of mask register "k" based on the most significant bit of the - corresponding packed 32-bit integer in "a". - - FOR j := 0 to 15 - i := j*32 - IF a[i+31] - k[j] := 1 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + Set each bit of mask register "k" based on the most significant bit of the corresponding packed 32-bit integer in "a". + +FOR j := 0 to 15 + i := j*32 + IF a[i+31] + k[j] := 1 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - Set each packed 32-bit integer in "dst" to all ones or all zeros based on the - value of the corresponding bit in "k". - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := 0xFFFFFFFF - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + Set each packed 32-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k". + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := 0xFFFFFFFF + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - Set each packed 64-bit integer in "dst" to all ones or all zeros based on the - value of the corresponding bit in "k". - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := 0xFFFFFFFFFFFFFFFF - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + Set each packed 64-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k". + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := 0xFFFFFFFFFFFFFFFF + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - Set each bit of mask register "k" based on the most significant bit of the - corresponding packed 64-bit integer in "a". - - FOR j := 0 to 7 - i := j*64 - IF a[i+63] - k[j] := 1 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + Set each bit of mask register "k" based on the most significant bit of the corresponding packed 64-bit integer in "a". + +FOR j := 0 to 7 + i := j*64 + IF a[i+63] + k[j] := 1 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. - - DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] - 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] - 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] - 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) - 1: dst[63:0] := tmp[63:0] - 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) - 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) - ESAC - - RETURN dst - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + +DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. [sae_note] - - DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] - 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] - 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] - 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) - 1: dst[63:0] := tmp[63:0] - 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) - 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) - ESAC - - RETURN dst - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. [sae_note] + +DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. - - DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] - 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] - 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] - 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) - 1: dst[63:0] := tmp[63:0] - 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) - 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) - ESAC - - RETURN dst - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + +DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. [sae_note] - - DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] - 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] - 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] - 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) - 1: dst[63:0] := tmp[63:0] - 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) - 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) - ESAC - - RETURN dst - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. [sae_note] + +DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and - store the results in "dst". - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. - - DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] - 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] - 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] - 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) - 1: dst[63:0] := tmp[63:0] - 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) - 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) - ESAC - - RETURN dst - } - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + +DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and - store the results in "dst". - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. [sae_note] - - DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] - 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] - 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] - 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) - 1: dst[63:0] := tmp[63:0] - 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) - 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) - ESAC - - RETURN dst - } - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. [sae_note] + +DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. - - DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] - 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] - 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] - 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) - 1: dst[31:0] := tmp[63:0] - 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) - 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) - ESAC - - RETURN dst - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + +DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[63:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. [sae_note] - - DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] - 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] - 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] - 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) - 1: dst[31:0] := tmp[63:0] - 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) - 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) - ESAC - - RETURN dst - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. [sae_note] + +DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[63:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. - - DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] - 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] - 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] - 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) - 1: dst[31:0] := tmp[63:0] - 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) - 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) - ESAC - - RETURN dst - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + +DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[63:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. [sae_note] - - DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] - 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] - 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] - 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) - 1: dst[31:0] := tmp[63:0] - 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) - 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) - ESAC - - RETURN dst - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. [sae_note] + +DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[63:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and - store the results in "dst". - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. - - DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] - 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] - 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] - 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) - 1: dst[31:0] := tmp[63:0] - 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) - 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) - ESAC - - RETURN dst - } - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + +DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[63:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and - store the results in "dst". - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. [sae_note] - - DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] - 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] - 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] - 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) - 1: dst[31:0] := tmp[63:0] - 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) - 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) - ESAC - - RETURN dst - } - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. [sae_note] + +DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[63:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for the lower double-precision (64-bit) floating-point element in "a" and "b", - store the result in the lower element of "dst" using writemask "k" (the element is - copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to - the upper element of "dst". - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. [sae_note] - - DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] - 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] - 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] - 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) - 1: dst[63:0] := tmp[63:0] - 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) - 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) - ESAC - - RETURN dst - } - IF k[0] - dst[63:0] := RANGE(a[63:0], b[63:0], imm8[1:0], imm8[3:2]) - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. [sae_note] + +DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} +IF k[0] + dst[63:0] := RANGE(a[63:0], b[63:0], imm8[1:0], imm8[3:2]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for the lower double-precision (64-bit) floating-point element in "a" and "b", - store the result in the lower element of "dst" using writemask "k" (the element is - copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to - the upper element of "dst". - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. - - DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] - 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] - 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] - 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) - 1: dst[63:0] := tmp[63:0] - 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) - 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) - ESAC - - RETURN dst - } - IF k[0] - dst[63:0] := RANGE(a[63:0], b[63:0], imm8[1:0], imm8[3:2]) - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + +DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} +IF k[0] + dst[63:0] := RANGE(a[63:0], b[63:0], imm8[1:0], imm8[3:2]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for the lower double-precision (64-bit) floating-point element in "a" and "b", - store the result in the lower element of "dst" using zeromask "k" (the element is zeroed - out when mask bit 0 is not set), and copy the upper element from "a" to the upper - element of "dst". - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. [sae_note] - - DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] - 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] - 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] - 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) - 1: dst[63:0] := tmp[63:0] - 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) - 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) - ESAC - - RETURN dst - } - IF k[0] - dst[63:0] := RANGE(a[63:0], b[63:0], imm8[1:0], imm8[3:2]) - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. [sae_note] + +DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} +IF k[0] + dst[63:0] := RANGE(a[63:0], b[63:0], imm8[1:0], imm8[3:2]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for the lower double-precision (64-bit) floating-point element in "a" and "b", - store the result in the lower element of "dst" using zeromask "k" (the element is zeroed - out when mask bit 0 is not set), and copy the upper element from "a" to the upper - element of "dst". - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. - - DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] - 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] - 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] - 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) - 1: dst[63:0] := tmp[63:0] - 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) - 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) - ESAC - - RETURN dst - } - IF k[0] - dst[63:0] := RANGE(a[63:0], b[63:0], imm8[1:0], imm8[3:2]) - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + +DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} +IF k[0] + dst[63:0] := RANGE(a[63:0], b[63:0], imm8[1:0], imm8[3:2]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for the lower double-precision (64-bit) floating-point element in "a" and "b", - store the result in the lower element of "dst", and copy the upper element from "a" to - the upper element of "dst". - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. [sae_note] - - DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] - 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] - 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] - 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) - 1: dst[63:0] := tmp[63:0] - 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) - 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) - ESAC - - RETURN dst - } - dst[63:0] := RANGE(a[63:0], b[63:0], imm8[1:0], imm8[3:2]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. [sae_note] + +DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src1[63:0] : src2[63:0] + 1: tmp[63:0] := (src1[63:0] <= src2[63:0]) ? src2[63:0] : src1[63:0] + 2: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src1[63:0] : src2[63:0] + 3: tmp[63:0] := (ABS(src1[63:0]) <= ABS(src2[63:0])) ? src2[63:0] : src1[63:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[63:0] := (src1[63] << 63) OR (tmp[62:0]) + 1: dst[63:0] := tmp[63:0] + 2: dst[63:0] := (0 << 63) OR (tmp[62:0]) + 3: dst[63:0] := (1 << 63) OR (tmp[62:0]) + ESAC + + RETURN dst +} +dst[63:0] := RANGE(a[63:0], b[63:0], imm8[1:0], imm8[3:2]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for the lower single-precision (32-bit) floating-point element in "a" and "b", - store the result in the lower element of "dst" using writemask "k" (the element is - copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from - "a" to the upper elements of "dst". - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. [sae_note] - - DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] - 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] - 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] - 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) - 1: dst[31:0] := tmp[31:0] - 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) - 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) - ESAC - - RETURN dst - } - IF k[0] - dst[31:0] := RANGE(a[31:0], b[31:0], imm8[1:0], imm8[3:2]) - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. [sae_note] + +DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[31:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} +IF k[0] + dst[31:0] := RANGE(a[31:0], b[31:0], imm8[1:0], imm8[3:2]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for the lower single-precision (32-bit) floating-point element in "a" and "b", - store the result in the lower element of "dst" using writemask "k" (the element is - copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from - "a" to the upper elements of "dst". - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. - - DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] - 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] - 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] - 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) - 1: dst[31:0] := tmp[31:0] - 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) - 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) - ESAC - - RETURN dst - } - IF k[0] - dst[31:0] := RANGE(a[31:0], b[31:0], imm8[1:0], imm8[3:2]) - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + +DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[31:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} +IF k[0] + dst[31:0] := RANGE(a[31:0], b[31:0], imm8[1:0], imm8[3:2]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for the lower single-precision (32-bit) floating-point element in "a" and "b", - store the result in the lower element of "dst" using zeromask "k" (the element is zeroed - out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the - upper elements of "dst". - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. [sae_note] - - DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] - 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] - 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] - 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) - 1: dst[31:0] := tmp[31:0] - 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) - 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) - ESAC - - RETURN dst - } - IF k[0] - dst[31:0] := RANGE(a[31:0], b[31:0], imm8[1:0], imm8[3:2]) - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. [sae_note] + +DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[31:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} +IF k[0] + dst[31:0] := RANGE(a[31:0], b[31:0], imm8[1:0], imm8[3:2]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for the lower single-precision (32-bit) floating-point element in "a" and "b", - store the result in the lower element of "dst" using zeromask "k" (the element is zeroed - out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the - upper elements of "dst". - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. - - DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] - 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] - 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] - 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) - 1: dst[31:0] := tmp[31:0] - 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) - 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) - ESAC - - RETURN dst - } - IF k[0] - dst[31:0] := RANGE(a[31:0], b[31:0], imm8[1:0], imm8[3:2]) - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. + +DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[31:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} +IF k[0] + dst[31:0] := RANGE(a[31:0], b[31:0], imm8[1:0], imm8[3:2]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - Calculate the max, min, absolute max, or absolute min (depending on control in - "imm8") for the lower single-precision (32-bit) floating-point element in "a" and "b", - store the result in the lower element of "dst", and copy the upper 3 packed elements - from "a" to the upper elements of "dst". - imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = - absolute max. - imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, - 10 = clear sign bit, 11 = set sign bit. [sae_note] - - DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { - CASE opCtl[1:0] OF - 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] - 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] - 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] - 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] - ESAC - - CASE signSelCtl[1:0] OF - 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) - 1: dst[31:0] := tmp[31:0] - 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) - 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) - ESAC - - RETURN dst - } - dst[31:0] := RANGE(a[31:0], b[31:0], imm8[1:0], imm8[3:2]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute min, 11 = absolute max. + imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. [sae_note] + +DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) { + CASE opCtl[1:0] OF + 0: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src1[31:0] : src2[31:0] + 1: tmp[31:0] := (src1[31:0] <= src2[31:0]) ? src2[31:0] : src1[31:0] + 2: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src1[31:0] : src2[31:0] + 3: tmp[31:0] := (ABS(src1[31:0]) <= ABS(src2[31:0])) ? src2[31:0] : src1[31:0] + ESAC + + CASE signSelCtl[1:0] OF + 0: dst[31:0] := (src1[31] << 31) OR (tmp[30:0]) + 1: dst[31:0] := tmp[31:0] + 2: dst[31:0] := (0 << 31) OR (tmp[30:0]) + 3: dst[31:0] := (1 << 31) OR (tmp[30:0]) + ESAC + + RETURN dst +} +dst[31:0] := RANGE(a[31:0], b[31:0], imm8[1:0], imm8[3:2]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - Extract the reduced argument of packed double-precision (64-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). [round_imm_note] - - DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - tmp[63:0] := src1[63:0] - tmp[63:0] - IF IsInf(tmp[63:0]) - tmp[63:0] := FP64(0.0) - FI - RETURN tmp[63:0] - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note] + +DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + tmp[63:0] := src1[63:0] - tmp[63:0] + IF IsInf(tmp[63:0]) + tmp[63:0] := FP64(0.0) + FI + RETURN tmp[63:0] +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - - Extract the reduced argument of packed double-precision (64-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). [round_imm_note][sae_note] - - DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - tmp[63:0] := src1[63:0] - tmp[63:0] - IF IsInf(tmp[63:0]) - tmp[63:0] := FP64(0.0) - FI - RETURN tmp[63:0] - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + + Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note][sae_note] + +DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + tmp[63:0] := src1[63:0] - tmp[63:0] + IF IsInf(tmp[63:0]) + tmp[63:0] := FP64(0.0) + FI + RETURN tmp[63:0] +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - Extract the reduced argument of packed double-precision (64-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). [round_imm_note] - - DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - tmp[63:0] := src1[63:0] - tmp[63:0] - IF IsInf(tmp[63:0]) - tmp[63:0] := FP64(0.0) - FI - RETURN tmp[63:0] - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note] + +DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + tmp[63:0] := src1[63:0] - tmp[63:0] + IF IsInf(tmp[63:0]) + tmp[63:0] := FP64(0.0) + FI + RETURN tmp[63:0] +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - Extract the reduced argument of packed double-precision (64-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). [round_imm_note][sae_note] - - DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - tmp[63:0] := src1[63:0] - tmp[63:0] - IF IsInf(tmp[63:0]) - tmp[63:0] := FP64(0.0) - FI - RETURN tmp[63:0] - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note][sae_note] + +DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + tmp[63:0] := src1[63:0] - tmp[63:0] + IF IsInf(tmp[63:0]) + tmp[63:0] := FP64(0.0) + FI + RETURN tmp[63:0] +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - Extract the reduced argument of packed double-precision (64-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst". [round_imm_note] - - DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - tmp[63:0] := src1[63:0] - tmp[63:0] - IF IsInf(tmp[63:0]) - tmp[63:0] := FP64(0.0) - FI - RETURN tmp[63:0] - } - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". [round_imm_note] + +DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + tmp[63:0] := src1[63:0] - tmp[63:0] + IF IsInf(tmp[63:0]) + tmp[63:0] := FP64(0.0) + FI + RETURN tmp[63:0] +} +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - Extract the reduced argument of packed double-precision (64-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst". [round_imm_note][sae_note] - - DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - tmp[63:0] := src1[63:0] - tmp[63:0] - IF IsInf(tmp[63:0]) - tmp[63:0] := FP64(0.0) - FI - RETURN tmp[63:0] - } - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". [round_imm_note][sae_note] + +DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + tmp[63:0] := src1[63:0] - tmp[63:0] + IF IsInf(tmp[63:0]) + tmp[63:0] := FP64(0.0) + FI + RETURN tmp[63:0] +} +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - Extract the reduced argument of packed single-precision (32-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). [round_imm_note] - - DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - tmp[31:0] := src1[31:0] - tmp[31:0] - IF IsInf(tmp[31:0]) - tmp[31:0] := FP32(0.0) - FI - RETURN tmp[31:0] - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note] + +DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + tmp[31:0] := src1[31:0] - tmp[31:0] + IF IsInf(tmp[31:0]) + tmp[31:0] := FP32(0.0) + FI + RETURN tmp[31:0] +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - - Extract the reduced argument of packed single-precision (32-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). [round_imm_note][sae_note] - - DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - tmp[31:0] := src1[31:0] - tmp[31:0] - IF IsInf(tmp[31:0]) - tmp[31:0] := FP32(0.0) - FI - RETURN tmp[31:0] - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + + Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note][sae_note] + +DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + tmp[31:0] := src1[31:0] - tmp[31:0] + IF IsInf(tmp[31:0]) + tmp[31:0] := FP32(0.0) + FI + RETURN tmp[31:0] +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - Extract the reduced argument of packed single-precision (32-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). [round_imm_note] - - DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - tmp[31:0] := src1[31:0] - tmp[31:0] - IF IsInf(tmp[31:0]) - tmp[31:0] := FP32(0.0) - FI - RETURN tmp[31:0] - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note] + +DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + tmp[31:0] := src1[31:0] - tmp[31:0] + IF IsInf(tmp[31:0]) + tmp[31:0] := FP32(0.0) + FI + RETURN tmp[31:0] +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - Extract the reduced argument of packed single-precision (32-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). [round_imm_note][sae_note] - - DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - tmp[31:0] := src1[31:0] - tmp[31:0] - IF IsInf(tmp[31:0]) - tmp[31:0] := FP32(0.0) - FI - RETURN tmp[31:0] - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note][sae_note] + +DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + tmp[31:0] := src1[31:0] - tmp[31:0] + IF IsInf(tmp[31:0]) + tmp[31:0] := FP32(0.0) + FI + RETURN tmp[31:0] +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - Extract the reduced argument of packed single-precision (32-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst". [round_imm_note] - - DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - tmp[31:0] := src1[31:0] - tmp[31:0] - IF IsInf(tmp[31:0]) - tmp[31:0] := FP32(0.0) - FI - RETURN tmp[31:0] - } - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". [round_imm_note] + +DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + tmp[31:0] := src1[31:0] - tmp[31:0] + IF IsInf(tmp[31:0]) + tmp[31:0] := FP32(0.0) + FI + RETURN tmp[31:0] +} +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - Extract the reduced argument of packed single-precision (32-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst". [round_imm_note][sae_note] - - DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - tmp[31:0] := src1[31:0] - tmp[31:0] - IF IsInf(tmp[31:0]) - tmp[31:0] := FP32(0.0) - FI - RETURN tmp[31:0] - } - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". [round_imm_note][sae_note] + +DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + tmp[31:0] := src1[31:0] - tmp[31:0] + IF IsInf(tmp[31:0]) + tmp[31:0] := FP32(0.0) + FI + RETURN tmp[31:0] +} +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - - Extract the reduced argument of the lower double-precision (64-bit) - floating-point element in "b" by the number of bits specified by "imm8", store the - result in the lower element of "dst" using writemask "k" (the element is copied from - "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper - element of "dst". [round_imm_note] - - DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - tmp[63:0] := src1[63:0] - tmp[63:0] - IF IsInf(tmp[63:0]) - tmp[63:0] := FP64(0.0) - FI - RETURN tmp[63:0] - } - IF k[0] - dst[63:0] := ReduceArgumentPD(b[63:0], imm8[7:0]) - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + + Extract the reduced argument of the lower double-precision (64-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". [round_imm_note] + +DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + tmp[63:0] := src1[63:0] - tmp[63:0] + IF IsInf(tmp[63:0]) + tmp[63:0] := FP64(0.0) + FI + RETURN tmp[63:0] +} +IF k[0] + dst[63:0] := ReduceArgumentPD(b[63:0], imm8[7:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - - - Extract the reduced argument of the lower double-precision (64-bit) - floating-point element in "b" by the number of bits specified by "imm8", store the - result in the lower element of "dst" using writemask "k" (the element is copied from - "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper - element of "dst". [round_imm_note][sae_note] - - DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - tmp[63:0] := src1[63:0] - tmp[63:0] - IF IsInf(tmp[63:0]) - tmp[63:0] := FP64(0.0) - FI - RETURN tmp[63:0] - } - IF k[0] - dst[63:0] := ReduceArgumentPD(b[63:0], imm8[7:0]) - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + + + Extract the reduced argument of the lower double-precision (64-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". [round_imm_note][sae_note] + +DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + tmp[63:0] := src1[63:0] - tmp[63:0] + IF IsInf(tmp[63:0]) + tmp[63:0] := FP64(0.0) + FI + RETURN tmp[63:0] +} +IF k[0] + dst[63:0] := ReduceArgumentPD(b[63:0], imm8[7:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - Extract the reduced argument of the lower double-precision (64-bit) - floating-point element in "b" by the number of bits specified by "imm8", store the - result in the lower element of "dst" using zeromask "k" (the element is zeroed out when - mask bit 0 is not set), and copy the upper element from "a" to the upper element of - "dst". [round_imm_note] - - DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - tmp[63:0] := src1[63:0] - tmp[63:0] - IF IsInf(tmp[63:0]) - tmp[63:0] := FP64(0.0) - FI - RETURN tmp[63:0] - } - IF k[0] - dst[63:0] := ReduceArgumentPD(b[63:0], imm8[7:0]) - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + Extract the reduced argument of the lower double-precision (64-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". [round_imm_note] + +DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + tmp[63:0] := src1[63:0] - tmp[63:0] + IF IsInf(tmp[63:0]) + tmp[63:0] := FP64(0.0) + FI + RETURN tmp[63:0] +} +IF k[0] + dst[63:0] := ReduceArgumentPD(b[63:0], imm8[7:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - - Extract the reduced argument of the lower double-precision (64-bit) - floating-point element in "b" by the number of bits specified by "imm8", store the - result in the lower element of "dst" using zeromask "k" (the element is zeroed out when - mask bit 0 is not set), and copy the upper element from "a" to the upper element of - "dst". [round_imm_note][sae_note] - - DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - tmp[63:0] := src1[63:0] - tmp[63:0] - IF IsInf(tmp[63:0]) - tmp[63:0] := FP64(0.0) - FI - RETURN tmp[63:0] - } - IF k[0] - dst[63:0] := ReduceArgumentPD(b[63:0], imm8[7:0]) - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + + Extract the reduced argument of the lower double-precision (64-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". [round_imm_note][sae_note] + +DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + tmp[63:0] := src1[63:0] - tmp[63:0] + IF IsInf(tmp[63:0]) + tmp[63:0] := FP64(0.0) + FI + RETURN tmp[63:0] +} +IF k[0] + dst[63:0] := ReduceArgumentPD(b[63:0], imm8[7:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - Extract the reduced argument of the lower double-precision (64-bit) - floating-point element in "b" by the number of bits specified by "imm8", store the - result in the lower element of "dst", and copy the upper element from "a" to the upper - element of "dst". [round_imm_note] - - DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - tmp[63:0] := src1[63:0] - tmp[63:0] - IF IsInf(tmp[63:0]) - tmp[63:0] := FP64(0.0) - FI - RETURN tmp[63:0] - } - dst[63:0] := ReduceArgumentPD(b[63:0], imm8[7:0]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + Extract the reduced argument of the lower double-precision (64-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". [round_imm_note] + +DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + tmp[63:0] := src1[63:0] - tmp[63:0] + IF IsInf(tmp[63:0]) + tmp[63:0] := FP64(0.0) + FI + RETURN tmp[63:0] +} +dst[63:0] := ReduceArgumentPD(b[63:0], imm8[7:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - Extract the reduced argument of the lower double-precision (64-bit) - floating-point element in "b" by the number of bits specified by "imm8", store the - result in the lower element of "dst", and copy the upper element from "a" to the upper - element of "dst". [round_imm_note][sae_note] - - DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - tmp[63:0] := src1[63:0] - tmp[63:0] - IF IsInf(tmp[63:0]) - tmp[63:0] := FP64(0.0) - FI - RETURN tmp[63:0] - } - dst[63:0] := ReduceArgumentPD(b[63:0], imm8[7:0]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + Extract the reduced argument of the lower double-precision (64-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". [round_imm_note][sae_note] + +DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + tmp[63:0] := src1[63:0] - tmp[63:0] + IF IsInf(tmp[63:0]) + tmp[63:0] := FP64(0.0) + FI + RETURN tmp[63:0] +} +dst[63:0] := ReduceArgumentPD(b[63:0], imm8[7:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - - Extract the reduced argument of the lower single-precision (32-bit) - floating-point element in "b" by the number of bits specified by "imm8", store the - result in the lower element of "dst" using writemask "k" (the element is copied from - "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the - upper elements of "dst". [round_imm_note] - - DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - tmp[31:0] := src1[31:0] - tmp[31:0] - IF IsInf(tmp[31:0]) - tmp[31:0] := FP32(0.0) - FI - RETURN tmp[31:0] - } - IF k[0] - dst[31:0] := ReduceArgumentPS(b[31:0], imm8[7:0]) - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + + Extract the reduced argument of the lower single-precision (32-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". [round_imm_note] + +DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + tmp[31:0] := src1[31:0] - tmp[31:0] + IF IsInf(tmp[31:0]) + tmp[31:0] := FP32(0.0) + FI + RETURN tmp[31:0] +} +IF k[0] + dst[31:0] := ReduceArgumentPS(b[31:0], imm8[7:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - - - Extract the reduced argument of the lower single-precision (32-bit) - floating-point element in "b" by the number of bits specified by "imm8", store the - result in the lower element of "dst" using writemask "k" (the element is copied from - "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the - upper elements of "dst". [round_imm_note][sae_note] - - DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - tmp[31:0] := src1[31:0] - tmp[31:0] - IF IsInf(tmp[31:0]) - tmp[31:0] := FP32(0.0) - FI - RETURN tmp[31:0] - } - IF k[0] - dst[31:0] := ReduceArgumentPS(b[31:0], imm8[7:0]) - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + + + Extract the reduced argument of the lower single-precision (32-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". [round_imm_note][sae_note] + +DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + tmp[31:0] := src1[31:0] - tmp[31:0] + IF IsInf(tmp[31:0]) + tmp[31:0] := FP32(0.0) + FI + RETURN tmp[31:0] +} +IF k[0] + dst[31:0] := ReduceArgumentPS(b[31:0], imm8[7:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - Extract the reduced argument of the lower single-precision (32-bit) - floating-point element in "b" by the number of bits specified by "imm8", store the - result in the lower element of "dst" using zeromask "k" (the element is zeroed out when - mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper - elements of "dst". [round_imm_note] - - DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - tmp[31:0] := src1[31:0] - tmp[31:0] - IF IsInf(tmp[31:0]) - tmp[31:0] := FP32(0.0) - FI - RETURN tmp[31:0] - } - IF k[0] - dst[31:0] := ReduceArgumentPS(b[31:0], imm8[7:0]) - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + Extract the reduced argument of the lower single-precision (32-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". [round_imm_note] + +DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + tmp[31:0] := src1[31:0] - tmp[31:0] + IF IsInf(tmp[31:0]) + tmp[31:0] := FP32(0.0) + FI + RETURN tmp[31:0] +} +IF k[0] + dst[31:0] := ReduceArgumentPS(b[31:0], imm8[7:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - - Extract the reduced argument of the lower single-precision (32-bit) - floating-point element in "b" by the number of bits specified by "imm8", store the - result in the lower element of "dst" using zeromask "k" (the element is zeroed out when - mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper - elements of "dst". [round_imm_note][sae_note] - - DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - tmp[31:0] := src1[31:0] - tmp[31:0] - IF IsInf(tmp[31:0]) - tmp[31:0] := FP32(0.0) - FI - RETURN tmp[31:0] - } - IF k[0] - dst[31:0] := ReduceArgumentPS(b[31:0], imm8[7:0]) - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + + Extract the reduced argument of the lower single-precision (32-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". [round_imm_note][sae_note] + +DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + tmp[31:0] := src1[31:0] - tmp[31:0] + IF IsInf(tmp[31:0]) + tmp[31:0] := FP32(0.0) + FI + RETURN tmp[31:0] +} +IF k[0] + dst[31:0] := ReduceArgumentPS(b[31:0], imm8[7:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - Extract the reduced argument of the lower single-precision (32-bit) - floating-point element in "b" by the number of bits specified by "imm8", store the - result in the lower element of "dst", and copy the upper 3 packed elements from "a" to - the upper elements of "dst". [round_imm_note] - - DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - tmp[31:0] := src1[31:0] - tmp[31:0] - IF IsInf(tmp[31:0]) - tmp[31:0] := FP32(0.0) - FI - RETURN tmp[31:0] - } - dst[31:0] := ReduceArgumentPS(b[31:0], imm8[7:0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + Extract the reduced argument of the lower single-precision (32-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". [round_imm_note] + +DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + tmp[31:0] := src1[31:0] - tmp[31:0] + IF IsInf(tmp[31:0]) + tmp[31:0] := FP32(0.0) + FI + RETURN tmp[31:0] +} +dst[31:0] := ReduceArgumentPS(b[31:0], imm8[7:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - - - Extract the reduced argument of the lower single-precision (32-bit) - floating-point element in "b" by the number of bits specified by "imm8", store the - result in the lower element of "dst", and copy the upper 3 packed elements from "a" to - the upper elements of "dst". [round_imm_note][sae_note] - - DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - tmp[31:0] := src1[31:0] - tmp[31:0] - IF IsInf(tmp[31:0]) - tmp[31:0] := FP32(0.0) - FI - RETURN tmp[31:0] - } - dst[31:0] := ReduceArgumentPS(b[31:0], imm8[7:0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512DQ -
immintrin.h
- Miscellaneous + + + + + + Extract the reduced argument of the lower single-precision (32-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". [round_imm_note][sae_note] + +DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + tmp[31:0] := src1[31:0] - tmp[31:0] + IF IsInf(tmp[31:0]) + tmp[31:0] := FP32(0.0) + FI + RETURN tmp[31:0] +} +dst[31:0] := ReduceArgumentPS(b[31:0], imm8[7:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512DQ +
immintrin.h
+ Miscellaneous
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 64-bit integers, and store the results in "dst". - [round_note] - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 64-bit integers, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 64-bit integers, and store the results in "dst". - [round_note] - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 64-bit integers, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 64-bit integers, and store the results in "dst". - [round_note] - - FOR j := 0 to 7 - i := j*64 - l := j*32 - dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 64-bit integers, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - l := j*32 - dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 64-bit integers, and store the results in "dst". - [round_note] - - FOR j := 0 to 7 - i := j*64 - l := j*32 - dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 64-bit integers, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - l := j*32 - dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - Convert packed signed 64-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst". - [round_note] - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + Convert packed signed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - Convert packed signed 64-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + Convert packed signed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - - Convert packed signed 64-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + + Convert packed signed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - Convert packed signed 64-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + Convert packed signed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - Convert packed signed 64-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + Convert packed signed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - Convert packed signed 64-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + Convert packed signed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - Convert packed signed 64-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst". - [round_note] - - FOR j := 0 to 7 - i := j*64 - l := j*32 - dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + Convert packed signed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := j*64 + l := j*32 + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - Convert packed signed 64-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - l := j*32 - dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + Convert packed signed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + l := j*32 + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - - Convert packed signed 64-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) - ELSE - dst[l+31:l] := src[l+31:l] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + + Convert packed signed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - Convert packed signed 64-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) - ELSE - dst[l+31:l] := src[l+31:l] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + Convert packed signed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - Convert packed signed 64-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) - ELSE - dst[l+31:l] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + Convert packed signed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - Convert packed signed 64-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) - ELSE - dst[l+31:l] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + Convert packed signed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 64-bit integers with truncation, and store the results in "dst". [sae_note] - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst". [sae_note] + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 64-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 64-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - [sae_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 64-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 64-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 64-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 64-bit integers with truncation, and store the results in "dst". - [sae_note] - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst". [sae_note] + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 64-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 64-bit integers with truncation, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). [sae_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 64-bit integers with truncation, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 64-bit integers with truncation, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [sae_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 64-bit integers with truncation, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 64-bit integers with truncation, and store the results in "dst". [sae_note] - - FOR j := 0 to 7 - i := j*64 - l := j*32 - dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst". [sae_note] + +FOR j := 0 to 7 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 64-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - l := j*32 - dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 64-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - [sae_note] - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 64-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 64-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 64-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 64-bit integers with truncation, and store the results in "dst". - [sae_note] - - FOR j := 0 to 7 - i := j*64 - l := j*32 - dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst". [sae_note] + +FOR j := 0 to 7 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 64-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - l := j*32 - dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 64-bit integers with truncation, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). [sae_note] - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 64-bit integers with truncation, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 64-bit integers with truncation, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [sae_note] - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 64-bit integers with truncation, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - Convert packed unsigned 64-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst". - [round_note] - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - Convert packed unsigned 64-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - - Convert packed unsigned 64-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + + Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 64-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 64-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - Convert packed unsigned 64-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - Convert packed unsigned 64-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst". - [round_note] - - FOR j := 0 to 7 - i := j*64 - l := j*32 - dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := j*64 + l := j*32 + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - Convert packed unsigned 64-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - l := j*32 - dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + l := j*32 + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - - Convert packed unsigned 64-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) - ELSE - dst[l+31:l] := src[l+31:l] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + + Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 64-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) - ELSE - dst[l+31:l] := src[l+31:l] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 64-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) - ELSE - dst[l+31:l] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + + Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - Convert packed unsigned 64-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) - ELSE - dst[l+31:l] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512DQ -
immintrin.h
- Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512DQ +
immintrin.h
+ Convert
- - - - - - Multiply the packed 64-bit integers in "a" and "b", producing intermediate - 128-bit integers, and store the low 64 bits of the intermediate integers in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - tmp[127:0] := a[i+63:i] * b[i+63:i] - dst[i+63:i] := tmp[63:0] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Arithmetic + + + + + + Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + tmp[127:0] := a[i+63:i] * b[i+63:i] + dst[i+63:i] := tmp[63:0] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Arithmetic
- - - - - Multiply the packed 64-bit integers in "a" and "b", producing intermediate - 128-bit integers, and store the low 64 bits of the intermediate integers in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - tmp[127:0] := a[i+63:i] * b[i+63:i] - dst[i+63:i] := tmp[63:0] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Arithmetic + + + + + Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + tmp[127:0] := a[i+63:i] * b[i+63:i] + dst[i+63:i] := tmp[63:0] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Arithmetic
- - - - Multiply the packed 64-bit integers in "a" and "b", producing intermediate - 128-bit integers, and store the low 64 bits of the intermediate integers in "dst". - - FOR j := 0 to 7 - i := j*64 - tmp[127:0] := a[i+63:i] * b[i+63:i] - dst[i+63:i] := tmp[63:0] - ENDFOR - dst[MAX:512] := 0 - - - AVX512DQ -
immintrin.h
- Arithmetic + + + + Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst". + +FOR j := 0 to 7 + i := j*64 + tmp[127:0] := a[i+63:i] * b[i+63:i] + dst[i+63:i] := tmp[63:0] +ENDFOR +dst[MAX:512] := 0 + + + AVX512DQ +
immintrin.h
+ Arithmetic
- - - - Add 8-bit masks in "a" and "b", and store the result in "k". - - k[7:0] := a[7:0] + b[7:0] - k[MAX:8] := 0 - - - AVX512DQ -
immintrin.h
- Mask + + + + Add 8-bit masks in "a" and "b", and store the result in "k". + +k[7:0] := a[7:0] + b[7:0] +k[MAX:8] := 0 + + + AVX512DQ +
immintrin.h
+ Mask
- - - - Add 16-bit masks in "a" and "b", and store the result in "k". - - k[15:0] := a[15:0] + b[15:0] - k[MAX:16] := 0 - - - AVX512DQ -
immintrin.h
- Mask + + + + Add 16-bit masks in "a" and "b", and store the result in "k". + +k[15:0] := a[15:0] + b[15:0] +k[MAX:16] := 0 + + + AVX512DQ +
immintrin.h
+ Mask
- - - - Compute the bitwise AND of 8-bit masks "a" and "b", and store the result in - "k". - - k[7:0] := a[7:0] AND b[7:0] - k[MAX:8] := 0 - - - AVX512DQ -
immintrin.h
- Mask + + + + Compute the bitwise AND of 8-bit masks "a" and "b", and store the result in "k". + +k[7:0] := a[7:0] AND b[7:0] +k[MAX:8] := 0 + + + AVX512DQ +
immintrin.h
+ Mask
- - - - Compute the bitwise NOT of 8-bit masks "a" and then AND with "b", and store the - result in "k". - - k[7:0] := (NOT a[7:0]) AND b[7:0] - k[MAX:8] := 0 - - - AVX512DQ -
immintrin.h
- Mask + + + + Compute the bitwise NOT of 8-bit masks "a" and then AND with "b", and store the result in "k". + +k[7:0] := (NOT a[7:0]) AND b[7:0] +k[MAX:8] := 0 + + + AVX512DQ +
immintrin.h
+ Mask
- - - Compute the bitwise NOT of 8-bit mask "a", and store the result in "k". - - k[7:0] := NOT a[7:0] - k[MAX:8] := 0 - - - AVX512DQ -
immintrin.h
- Mask + + + Compute the bitwise NOT of 8-bit mask "a", and store the result in "k". + +k[7:0] := NOT a[7:0] +k[MAX:8] := 0 + + + AVX512DQ +
immintrin.h
+ Mask
- - - - Compute the bitwise OR of 8-bit masks "a" and "b", and store the result in "k". - - k[7:0] := a[7:0] OR b[7:0] - k[MAX:8] := 0 - - - AVX512DQ -
immintrin.h
- Mask + + + + Compute the bitwise OR of 8-bit masks "a" and "b", and store the result in "k". + +k[7:0] := a[7:0] OR b[7:0] +k[MAX:8] := 0 + + + AVX512DQ +
immintrin.h
+ Mask
- - - - Compute the bitwise XNOR of 8-bit masks "a" and "b", and store the result in - "k". - - k[7:0] := NOT (a[7:0] XOR b[7:0]) - k[MAX:8] := 0 - - - AVX512DQ -
immintrin.h
- Mask + + + + Compute the bitwise XNOR of 8-bit masks "a" and "b", and store the result in "k". + +k[7:0] := NOT (a[7:0] XOR b[7:0]) +k[MAX:8] := 0 + + + AVX512DQ +
immintrin.h
+ Mask
- - - - Compute the bitwise XOR of 8-bit masks "a" and "b", and store the result in - "k". - - k[7:0] := a[7:0] XOR b[7:0] - k[MAX:8] := 0 - - - AVX512DQ -
immintrin.h
- Mask + + + + Compute the bitwise XOR of 8-bit masks "a" and "b", and store the result in "k". + +k[7:0] := a[7:0] XOR b[7:0] +k[MAX:8] := 0 + + + AVX512DQ +
immintrin.h
+ Mask
- - - - Shift the bits of 8-bit mask "a" left by "count" while shifting in zeros, and - store the least significant 8 bits of the result in "k". - - k[MAX:0] := 0 - IF count[7:0] <= 7 - k[7:0] := a[7:0] << count[7:0] - FI - - - AVX512DQ -
immintrin.h
- Mask + + + + Shift the bits of 8-bit mask "a" left by "count" while shifting in zeros, and store the least significant 8 bits of the result in "k". + +k[MAX:0] := 0 +IF count[7:0] <= 7 + k[7:0] := a[7:0] << count[7:0] +FI + + + AVX512DQ +
immintrin.h
+ Mask
- - - - Shift the bits of 8-bit mask "a" right by "count" while shifting in zeros, and - store the least significant 8 bits of the result in "k". - - k[MAX:0] := 0 - IF count[7:0] <= 7 - k[7:0] := a[7:0] >> count[7:0] - FI - - - AVX512DQ -
immintrin.h
- Mask + + + + Shift the bits of 8-bit mask "a" right by "count" while shifting in zeros, and store the least significant 8 bits of the result in "k". + +k[MAX:0] := 0 +IF count[7:0] <= 7 + k[7:0] := a[7:0] >> count[7:0] +FI + + + AVX512DQ +
immintrin.h
+ Mask
- - - - - Compute the bitwise OR of 8-bit masks "a" and "b". If the result is all zeros, - store 1 in "dst", otherwise store 0 in "dst". If the result is all ones, store 1 in - "all_ones", otherwise store 0 in "all_ones". - - tmp[7:0] := a[7:0] OR b[7:0] - IF tmp[7:0] == 0x0 - dst := 1 - ELSE - dst := 0 - FI - IF tmp[7:0] == 0xFF - MEM[all_ones+7:all_ones] := 1 - ELSE - MEM[all_ones+7:all_ones] := 0 - FI - - - AVX512DQ -
immintrin.h
- Mask + + + + + Compute the bitwise OR of 8-bit masks "a" and "b". If the result is all zeros, store 1 in "dst", otherwise store 0 in "dst". If the result is all ones, store 1 in "all_ones", otherwise store 0 in "all_ones". + +tmp[7:0] := a[7:0] OR b[7:0] +IF tmp[7:0] == 0x0 + dst := 1 +ELSE + dst := 0 +FI +IF tmp[7:0] == 0xFF + MEM[all_ones+7:all_ones] := 1 +ELSE + MEM[all_ones+7:all_ones] := 0 +FI + + + AVX512DQ +
immintrin.h
+ Mask
- - - - Compute the bitwise OR of 8-bit masks "a" and "b". If the result is all zeroes, - store 1 in "dst", otherwise store 0 in "dst". - - tmp[7:0] := a[7:0] OR b[7:0] - IF tmp[7:0] == 0x0 - dst := 1 - ELSE - dst := 0 - FI - - - AVX512DQ -
immintrin.h
- Mask + + + + Compute the bitwise OR of 8-bit masks "a" and "b". If the result is all zeroes, store 1 in "dst", otherwise store 0 in "dst". + +tmp[7:0] := a[7:0] OR b[7:0] +IF tmp[7:0] == 0x0 + dst := 1 +ELSE + dst := 0 +FI + + + AVX512DQ +
immintrin.h
+ Mask
- - - - Compute the bitwise OR of 8-bit masks "a" and "b". If the result is all ones, - store 1 in "dst", otherwise store 0 in "dst". - - tmp[7:0] := a[7:0] OR b[7:0] - IF tmp[7:0] == 0xFF - dst := 1 - ELSE - dst := 0 - FI - - - AVX512DQ -
immintrin.h
- Mask + + + + Compute the bitwise OR of 8-bit masks "a" and "b". If the result is all ones, store 1 in "dst", otherwise store 0 in "dst". + +tmp[7:0] := a[7:0] OR b[7:0] +IF tmp[7:0] == 0xFF + dst := 1 +ELSE + dst := 0 +FI + + + AVX512DQ +
immintrin.h
+ Mask
- - - - - Compute the bitwise AND of 8-bit masks "a" and "b", and if the result is all - zeros, store 1 in "dst", otherwise store 0 in "dst". Compute the bitwise NOT of "a" and - then AND with "b", if the result is all zeros, store 1 in "and_not", otherwise store 0 - in "and_not". - - tmp1[7:0] := a[7:0] AND b[7:0] - IF tmp1[7:0] == 0x0 - dst := 1 - ELSE - dst := 0 - FI - tmp2[7:0] := (NOT a[7:0]) AND b[7:0] - IF tmp2[7:0] == 0x0 - MEM[and_not+7:and_not] := 1 - ELSE - MEM[and_not+7:and_not] := 0 - FI - - - AVX512DQ -
immintrin.h
- Mask + + + + + Compute the bitwise AND of 8-bit masks "a" and "b", and if the result is all zeros, store 1 in "dst", otherwise store 0 in "dst". Compute the bitwise NOT of "a" and then AND with "b", if the result is all zeros, store 1 in "and_not", otherwise store 0 in "and_not". + +tmp1[7:0] := a[7:0] AND b[7:0] +IF tmp1[7:0] == 0x0 + dst := 1 +ELSE + dst := 0 +FI +tmp2[7:0] := (NOT a[7:0]) AND b[7:0] +IF tmp2[7:0] == 0x0 + MEM[and_not+7:and_not] := 1 +ELSE + MEM[and_not+7:and_not] := 0 +FI + + + AVX512DQ +
immintrin.h
+ Mask
- - - - Compute the bitwise AND of 8-bit masks "a" and "b", and if the result is all - zeros, store 1 in "dst", otherwise store 0 in "dst". - - tmp[7:0] := a[7:0] AND b[7:0] - IF tmp[7:0] == 0x0 - dst := 1 - ELSE - dst := 0 - FI - - - AVX512DQ -
immintrin.h
- Mask + + + + Compute the bitwise AND of 8-bit masks "a" and "b", and if the result is all zeros, store 1 in "dst", otherwise store 0 in "dst". + +tmp[7:0] := a[7:0] AND b[7:0] +IF tmp[7:0] == 0x0 + dst := 1 +ELSE + dst := 0 +FI + + + AVX512DQ +
immintrin.h
+ Mask
- - - - Compute the bitwise NOT of 8-bit mask "a" and then AND with "b", if the result - is all zeroes, store 1 in "dst", otherwise store 0 in "dst". - - tmp[7:0] := (NOT a[7:0]) AND b[7:0] - IF tmp[7:0] == 0x0 - dst := 1 - ELSE - dst := 0 - FI - - - AVX512DQ -
immintrin.h
- Mask + + + + Compute the bitwise NOT of 8-bit mask "a" and then AND with "b", if the result is all zeroes, store 1 in "dst", otherwise store 0 in "dst". + +tmp[7:0] := (NOT a[7:0]) AND b[7:0] +IF tmp[7:0] == 0x0 + dst := 1 +ELSE + dst := 0 +FI + + + AVX512DQ +
immintrin.h
+ Mask
- - - - - Compute the bitwise AND of 16-bit masks "a" and "b", and if the result is all - zeros, store 1 in "dst", otherwise store 0 in "dst". Compute the bitwise NOT of "a" and - then AND with "b", if the result is all zeros, store 1 in "and_not", otherwise store 0 - in "and_not". - - tmp1[15:0] := a[15:0] AND b[15:0] - IF tmp1[15:0] == 0x0 - dst := 1 - ELSE - dst := 0 - FI - tmp2[15:0] := (NOT a[15:0]) AND b[15:0] - IF tmp2[15:0] == 0x0 - MEM[and_not+7:and_not] := 1 - ELSE - MEM[and_not+7:and_not] := 0 - FI - - - AVX512DQ -
immintrin.h
- Mask + + + + + Compute the bitwise AND of 16-bit masks "a" and "b", and if the result is all zeros, store 1 in "dst", otherwise store 0 in "dst". Compute the bitwise NOT of "a" and then AND with "b", if the result is all zeros, store 1 in "and_not", otherwise store 0 in "and_not". + +tmp1[15:0] := a[15:0] AND b[15:0] +IF tmp1[15:0] == 0x0 + dst := 1 +ELSE + dst := 0 +FI +tmp2[15:0] := (NOT a[15:0]) AND b[15:0] +IF tmp2[15:0] == 0x0 + MEM[and_not+7:and_not] := 1 +ELSE + MEM[and_not+7:and_not] := 0 +FI + + + AVX512DQ +
immintrin.h
+ Mask
- - - - Compute the bitwise AND of 16-bit masks "a" and "b", and if the result is all - zeros, store 1 in "dst", otherwise store 0 in "dst". - - tmp[15:0] := a[15:0] AND b[15:0] - IF tmp[15:0] == 0x0 - dst := 1 - ELSE - dst := 0 - FI - - - AVX512DQ -
immintrin.h
- Mask + + + + Compute the bitwise AND of 16-bit masks "a" and "b", and if the result is all zeros, store 1 in "dst", otherwise store 0 in "dst". + +tmp[15:0] := a[15:0] AND b[15:0] +IF tmp[15:0] == 0x0 + dst := 1 +ELSE + dst := 0 +FI + + + AVX512DQ +
immintrin.h
+ Mask
- - - - Compute the bitwise NOT of 16-bit mask "a" and then AND with "b", if the result - is all zeroes, store 1 in "dst", otherwise store 0 in "dst". - - tmp[15:0] := (NOT a[15:0]) AND b[15:0] - IF tmp[15:0] == 0x0 - dst := 1 - ELSE - dst := 0 - FI - - - AVX512DQ -
immintrin.h
- Mask + + + + Compute the bitwise NOT of 16-bit mask "a" and then AND with "b", if the result is all zeroes, store 1 in "dst", otherwise store 0 in "dst". + +tmp[15:0] := (NOT a[15:0]) AND b[15:0] +IF tmp[15:0] == 0x0 + dst := 1 +ELSE + dst := 0 +FI + + + AVX512DQ +
immintrin.h
+ Mask
- - - Convert 8-bit mask "a" into an integer value, and store the result in "dst". - - dst := ZeroExtend32(a[7:0]) - - - AVX512DQ -
immintrin.h
- Mask + + + Convert 8-bit mask "a" into an integer value, and store the result in "dst". + +dst := ZeroExtend32(a[7:0]) + + + AVX512DQ +
immintrin.h
+ Mask
- - - Convert integer value "a" into an 8-bit mask, and store the result in "k". - - k := a[7:0] - - - AVX512DQ -
immintrin.h
- Mask + + + Convert integer value "a" into an 8-bit mask, and store the result in "k". + +k := a[7:0] + + + AVX512DQ +
immintrin.h
+ Mask
- - - Load 8-bit mask from memory into "k". - - k[7:0] := MEM[mem_addr+7:mem_addr] - - - AVX512DQ -
immintrin.h
- Load + + + Load 8-bit mask from memory into "k". + +k[7:0] := MEM[mem_addr+7:mem_addr] + + + AVX512DQ +
immintrin.h
+ Load
- - - - Store 8-bit mask from "a" into memory. - - MEM[mem_addr+7:mem_addr] := a[7:0] - - - AVX512DQ -
immintrin.h
- Store -
- - - - - - Compute the inverse cosine of packed double-precision (64-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := ACOS(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + Store 8-bit mask from "a" into memory. + +MEM[mem_addr+7:mem_addr] := a[7:0] + + + AVX512DQ +
immintrin.h
+ Store +
+ + + + + + Compute the inverse cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ACOS(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the inverse cosine of packed double-precision (64-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ACOS(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the inverse cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ACOS(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the inverse cosine of packed single-precision (32-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := ACOS(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the inverse cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ACOS(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the inverse cosine of packed single-precision (32-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ACOS(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the inverse cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ACOS(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the inverse hyperbolic cosine of packed double-precision (64-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := ACOSH(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the inverse hyperbolic cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ACOSH(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the inverse hyperbolic cosine of packed double-precision (64-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ACOSH(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the inverse hyperbolic cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ACOSH(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the inverse hyperbolic cosine of packed single-precision (32-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := ACOSH(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the inverse hyperbolic cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ACOSH(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the inverse hyperbolic cosine of packed single-precision (32-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ACOSH(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the inverse hyperbolic cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ACOSH(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the inverse sine of packed double-precision (64-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := ASIN(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the inverse sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ASIN(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the inverse sine of packed double-precision (64-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ASIN(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the inverse sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ASIN(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the inverse sine of packed single-precision (32-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := ASIN(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the inverse sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ASIN(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the inverse sine of packed single-precision (32-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ASIN(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the inverse sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ASIN(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the inverse hyperbolic sine of packed double-precision (64-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := ASINH(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the inverse hyperbolic sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ASINH(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the inverse hyperbolic sine of packed double-precision (64-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ASINH(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the inverse hyperbolic sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ASINH(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the inverse hyperbolic sine of packed single-precision (32-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := ASINH(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the inverse hyperbolic sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ASINH(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the inverse hyperbolic sine of packed single-precision (32-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ASINH(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the inverse hyperbolic sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ASINH(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - Compute the inverse tangent of packed double-precision (64-bit) floating-point - elements in "a" divided by packed elements in "b", and store the results in "dst" - expressed in radians. - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := ATAN2(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + Compute the inverse tangent of packed double-precision (64-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians. + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ATAN2(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - - Compute the inverse tangent of packed double-precision (64-bit) floating-point - elements in "a" divided by packed elements in "b", and store the results in "dst" - expressed in radians using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ATAN2(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + + Compute the inverse tangent of packed double-precision (64-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ATAN2(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - Compute the inverse tangent of packed single-precision (32-bit) floating-point - elements in "a" divided by packed elements in "b", and store the results in "dst" - expressed in radians. - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := ATAN2(a[i+31:i], b[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + Compute the inverse tangent of packed single-precision (32-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians. + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ATAN2(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - - Compute the inverse tangent of packed single-precision (32-bit) floating-point - elements in "a" divided by packed elements in "b", and store the results in "dst" - expressed in radians using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ATAN2(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + + Compute the inverse tangent of packed single-precision (32-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ATAN2(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the inverse tangent of packed double-precision (64-bit) floating-point - elements in "a" and store the results in "dst" expressed in radians. - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := ATAN(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the inverse tangent of packed double-precision (64-bit) floating-point elements in "a" and store the results in "dst" expressed in radians. + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ATAN(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the inverse tangent of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst" expressed in radians using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ATAN(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the inverse tangent of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" expressed in radians using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ATAN(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the inverse tangent of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst" expressed in radians. - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := ATAN(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the inverse tangent of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" expressed in radians. + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ATAN(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the inverse tangent of packed single-precision (32-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ATAN(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the inverse tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ATAN(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the inverse hyperbolic tangent of packed double-precision (64-bit) - floating-point elements in "a" and store the results in "dst" expressed in radians. - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := ATANH(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the inverse hyperbolic tangent of packed double-precision (64-bit) floating-point elements in "a" and store the results in "dst" expressed in radians. + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ATANH(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the inverse hyperbolic tangent of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst" expressed in radians - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ATANH(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the inverse hyperbolic tangent of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" expressed in radians using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ATANH(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the inverse hyperblic tangent of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst" expressed in radians. - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := ATANH(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the inverse hyperblic tangent of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" expressed in radians. + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ATANH(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the inverse hyperbolic tangent of packed single-precision (32-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ATANH(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the inverse hyperbolic tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ATANH(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the cosine of packed double-precision (64-bit) floating-point elements - in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := COS(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := COS(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the cosine of packed double-precision (64-bit) floating-point elements - in "a" expressed in radians, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := COS(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := COS(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the cosine of packed single-precision (32-bit) floating-point elements - in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := COS(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := COS(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the cosine of packed single-precision (32-bit) floating-point elements - in "a" expressed in radians, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := COS(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := COS(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the cosine of packed double-precision (64-bit) floating-point elements - in "a" expressed in degrees, and store the results in "dst". - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := COSD(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := COSD(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the cosine of packed double-precision (64-bit) floating-point elements - in "a" expressed in degrees, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := COSD(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := COSD(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the cosine of packed single-precision (32-bit) floating-point elements - in "a" expressed in degrees, and store the results in "dst". - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := COSD(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := COSD(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the cosine of packed single-precision (32-bit) floating-point elements - in "a" expressed in degrees, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := COSD(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := COSD(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the hyperbolic cosine of packed double-precision (64-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := COSH(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the hyperbolic cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := COSH(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the hyperbolic cosine of packed double-precision (64-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := COSH(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the hyperbolic cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := COSH(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the hyperbolic cosine of packed single-precision (32-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := COSH(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the hyperbolic cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := COSH(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the hyperbolic cosine of packed single-precision (32-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := COSH(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the hyperbolic cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := COSH(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the sine of packed double-precision (64-bit) floating-point elements in - "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := SIN(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := SIN(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the sine of packed double-precision (64-bit) floating-point elements in - "a" expressed in radians, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := SIN(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SIN(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the sine of packed single-precision (32-bit) floating-point elements in - "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := SIN(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := SIN(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the sine of packed single-precision (32-bit) floating-point elements in - "a" expressed in radians, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := SIN(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SIN(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the hyperbolic sine of packed double-precision (64-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := SINH(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the hyperbolic sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := SINH(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the hyperbolic sine of packed double-precision (64-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := SINH(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the hyperbolic sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SINH(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the hyperbolic sine of packed single-precision (32-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := SINH(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the hyperbolic sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := SINH(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the hyperbolic sine of packed single-precision (32-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := SINH(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the hyperbolic sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SINH(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the sine of packed double-precision (64-bit) floating-point elements in - "a" expressed in degrees, and store the results in "dst". - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := SIND(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the sine of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := SIND(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the sine of packed double-precision (64-bit) floating-point elements in - "a" expressed in degrees, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := SIND(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the sine of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SIND(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the sine of packed single-precision (32-bit) floating-point elements in - "a" expressed in degrees, and store the results in "dst". - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := SIND(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the sine of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := SIND(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the sine of packed single-precision (32-bit) floating-point elements in - "a" expressed in degrees, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := SIND(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the sine of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SIND(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the tangent of packed double-precision (64-bit) floating-point elements - in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := TAN(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := TAN(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the tangent of packed double-precision (64-bit) floating-point elements - in "a" expressed in radians, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := TAN(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := TAN(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the tangent of packed single-precision (32-bit) floating-point elements - in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := TAN(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := TAN(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the tangent of packed single-precision (32-bit) floating-point elements - in "a" expressed in radians, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := TAN(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := TAN(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the tangent of packed double-precision (64-bit) floating-point elements - in "a" expressed in degrees, and store the results in "dst". - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := TAND(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := TAND(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the tangent of packed double-precision (64-bit) floating-point elements - in "a" expressed in degrees, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := TAND(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := TAND(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the tangent of packed single-precision (32-bit) floating-point elements - in "a" expressed in degrees, and store the results in "dst". - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := TAND(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := TAND(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the tangent of packed single-precision (32-bit) floating-point elements - in "a" expressed in degrees, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := TAND(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := TAND(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the hyperbolic tangent of packed double-precision (64-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := TANH(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the hyperbolic tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := TANH(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the hyperbolic tangent of packed double-precision (64-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := TANH(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the hyperbolic tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := TANH(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the hyperbolic tangent of packed single-precision (32-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := TANH(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + Compute the hyperbolic tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := TANH(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - Compute the hyperbolic tangent of packed single-precision (32-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := TANH(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + Compute the hyperbolic tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := TANH(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - Compute the sine and cosine of packed double-precision (64-bit) floating-point - elements in "a" expressed in radians, store the sine in "dst", and store the cosine into - memory at "mem_addr". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := SIN(a[i+63:i]) - MEM[mem_addr+i+63:mem_addr+i] := COS(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - cos_res[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + Compute the sine and cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, store the sine in "dst", and store the cosine into memory at "mem_addr". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := SIN(a[i+63:i]) + MEM[mem_addr+i+63:mem_addr+i] := COS(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 +cos_res[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - - - Compute the sine and cosine of packed double-precision (64-bit) floating-point - elements in "a" expressed in radians, store the sine in "dst", store the cosine into - memory at "mem_addr". Elements are written to their respective locations using writemask - "k" (elements are copied from "sin_src" or "cos_src" when the corresponding mask bit is - not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := SIN(a[i+63:i]) - MEM[mem_addr+i+63:mem_addr+i] := COS(a[i+63:i]) - ELSE - dst[i+63:i] := sin_src[i+63:i] - MEM[mem_addr+i+63:mem_addr+i] := cos_src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - cos_res[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + + + Compute the sine and cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, store the sine in "dst", store the cosine into memory at "mem_addr". Elements are written to their respective locations using writemask "k" (elements are copied from "sin_src" or "cos_src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SIN(a[i+63:i]) + MEM[mem_addr+i+63:mem_addr+i] := COS(a[i+63:i]) + ELSE + dst[i+63:i] := sin_src[i+63:i] + MEM[mem_addr+i+63:mem_addr+i] := cos_src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 +cos_res[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - Compute the sine and cosine of packed single-precision (32-bit) floating-point - elements in "a" expressed in radians, store the sine in "dst", and store the cosine into - memory at "mem_addr". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := SIN(a[i+31:i]) - MEM[mem_addr+i+31:mem_addr+i] := COS(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - cos_res[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + Compute the sine and cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, store the sine in "dst", and store the cosine into memory at "mem_addr". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := SIN(a[i+31:i]) + MEM[mem_addr+i+31:mem_addr+i] := COS(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 +cos_res[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - - - - - Compute the sine and cosine of packed single-precision (32-bit) floating-point - elements in "a" expressed in radians, store the sine in "dst", store the cosine into - memory at "mem_addr". Elements are written to their respective locations using writemask - "k" (elements are copied from "sin_src" or "cos_src" when the corresponding mask bit is - not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := SIN(a[i+31:i]) - MEM[mem_addr+i+31:mem_addr+i] := COS(a[i+31:i]) - ELSE - dst[i+31:i] := sin_src[i+31:i] - MEM[mem_addr+i+31:mem_addr+i] := cos_src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - cos_res[MAX:512] := 0 - - AVX512F -
immintrin.h
- Trigonometry + + + + + + + Compute the sine and cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, store the sine in "dst", store the cosine into memory at "mem_addr". Elements are written to their respective locations using writemask "k" (elements are copied from "sin_src" or "cos_src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SIN(a[i+31:i]) + MEM[mem_addr+i+31:mem_addr+i] := COS(a[i+31:i]) + ELSE + dst[i+31:i] := sin_src[i+31:i] + MEM[mem_addr+i+31:mem_addr+i] := cos_src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 +cos_res[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Trigonometry
- - - Compute the cube root of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst". - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := CubeRoot(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Compute the cube root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := CubeRoot(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the cube root of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := CubeRoot(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the cube root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := CubeRoot(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Compute the cube root of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst". - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := CubeRoot(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Compute the cube root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := CubeRoot(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the cube root of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := CubeRoot(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the cube root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := CubeRoot(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Compute the exponential value of 10 raised to the power of packed - double-precision (64-bit) floating-point elements in "a", and store the results in - "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := POW(10.0, a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Compute the exponential value of 10 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := POW(10.0, a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the exponential value of 10 raised to the power of packed - double-precision (64-bit) floating-point elements in "a", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := POW(10.0, a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the exponential value of 10 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := POW(10.0, a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Compute the exponential value of 10 raised to the power of packed - single-precision (32-bit) floating-point elements in "a", and store the results in - "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := POW(FP32(10.0), a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Compute the exponential value of 10 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := POW(FP32(10.0), a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the exponential value of 10 raised to the power of packed - single-precision (32-bit) floating-point elements in "a", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := POW(FP32(10.0), a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the exponential value of 10 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := POW(FP32(10.0), a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Compute the exponential value of 2 raised to the power of packed - double-precision (64-bit) floating-point elements in "a", and store the results in - "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := POW(2.0, a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Compute the exponential value of 2 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := POW(2.0, a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the exponential value of 2 raised to the power of packed - double-precision (64-bit) floating-point elements in "a", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := POW(2.0, a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the exponential value of 2 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := POW(2.0, a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Compute the exponential value of 2 raised to the power of packed - single-precision (32-bit) floating-point elements in "a", and store the results in - "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := POW(FP32(2.0), a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Compute the exponential value of 2 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := POW(FP32(2.0), a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the exponential value of 2 raised to the power of packed - single-precision (32-bit) floating-point elements in "a", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := POW(FP32(2.0), a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the exponential value of 2 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := POW(FP32(2.0), a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Compute the exponential value of "e" raised to the power of packed - double-precision (64-bit) floating-point elements in "a", and store the results in - "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := POW(e, a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Compute the exponential value of "e" raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := POW(e, a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the exponential value of "e" raised to the power of packed - double-precision (64-bit) floating-point elements in "a", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := POW(e, a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the exponential value of "e" raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := POW(e, a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Compute the exponential value of "e" raised to the power of packed - single-precision (32-bit) floating-point elements in "a", and store the results in - "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := POW(FP32(e), a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Compute the exponential value of "e" raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := POW(FP32(e), a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the exponential value of "e" raised to the power of packed - single-precision (32-bit) floating-point elements in "a", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := POW(FP32(e), a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the exponential value of "e" raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := POW(FP32(e), a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Compute the exponential value of "e" raised to the power of packed - double-precision (64-bit) floating-point elements in "a", subtract one from each - element, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := POW(e, a[i+63:i]) - 1.0 - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Compute the exponential value of "e" raised to the power of packed double-precision (64-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := POW(e, a[i+63:i]) - 1.0 +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the exponential value of "e" raised to the power of packed - double-precision (64-bit) floating-point elements in "a", subtract one from each - element, and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := POW(e, a[i+63:i]) - 1.0 - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the exponential value of "e" raised to the power of packed double-precision (64-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := POW(e, a[i+63:i]) - 1.0 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Compute the exponential value of "e" raised to the power of packed - single-precision (32-bit) floating-point elements in "a", subtract one from each - element, and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := POW(FP32(e), a[i+31:i]) - 1.0 - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Compute the exponential value of "e" raised to the power of packed single-precision (32-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := POW(FP32(e), a[i+31:i]) - 1.0 +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the exponential value of "e" raised to the power of packed - single-precision (32-bit) floating-point elements in "a", subtract one from each - element, and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := POW(FP32(e), a[i+31:i]) - 1.0 - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the exponential value of "e" raised to the power of packed single-precision (32-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := POW(FP32(e), a[i+31:i]) - 1.0 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - Compute the length of the hypotenous of a right triangle, with the lengths of - the other two sides of the triangle stored as packed double-precision (64-bit) - floating-point elements in "a" and "b", and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := SQRT(POW(a[i+63:i], 2.0) + POW(b[i+63:i], 2.0)) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := SQRT(POW(a[i+63:i], 2.0) + POW(b[i+63:i], 2.0)) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - - Compute the length of the hypotenous of a right triangle, with the lengths of - the other two sides of the triangle stored as packed double-precision (64-bit) - floating-point elements in "a" and "b", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := SQRT(POW(a[i+63:i], 2.0) + POW(b[i+63:i], 2.0)) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + + Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SQRT(POW(a[i+63:i], 2.0) + POW(b[i+63:i], 2.0)) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - Compute the length of the hypotenous of a right triangle, with the lengths of - the other two sides of the triangle stored as packed single-precision (32-bit) - floating-point elements in "a" and "b", and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := SQRT(POW(a[i+31:i], 2.0) + POW(b[i+31:i], 2.0)) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := SQRT(POW(a[i+31:i], 2.0) + POW(b[i+31:i], 2.0)) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - - Compute the length of the hypotenous of a right triangle, with the lengths of - the other two sides of the triangle stored as packed single-precision (32-bit) - floating-point elements in "a" and "b", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := SQRT(POW(a[i+31:i], 2.0) + POW(b[i+31:i], 2.0)) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + + Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SQRT(POW(a[i+31:i], 2.0) + POW(b[i+31:i], 2.0)) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Compute the inverse square root of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := InvSQRT(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Compute the inverse square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := InvSQRT(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the inverse square root of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := InvSQRT(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the inverse square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := InvSQRT(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Compute the inverse square root of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := InvSQRT(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Compute the inverse square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := InvSQRT(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the inverse square root of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := InvSQRT(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the inverse square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := InvSQRT(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Compute the base-10 logarithm of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := LOG(a[i+63:i]) / LOG(10.0) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Compute the base-10 logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := LOG(a[i+63:i]) / LOG(10.0) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the base-10 logarithm of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := LOG(a[i+63:i]) / LOG(10.0) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the base-10 logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := LOG(a[i+63:i]) / LOG(10.0) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Compute the base-10 logarithm of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := LOG(a[i+31:i]) / LOG(10.0) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Compute the base-10 logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := LOG(a[i+31:i]) / LOG(10.0) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the base-10 logarithm of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := LOG(a[i+31:i]) / LOG(10.0) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the base-10 logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := LOG(a[i+31:i]) / LOG(10.0) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Compute the natural logarithm of one plus packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := LOG(1.0 + a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Compute the natural logarithm of one plus packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := LOG(1.0 + a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the natural logarithm of one plus packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := LOG(1.0 + a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the natural logarithm of one plus packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := LOG(1.0 + a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Compute the natural logarithm of one plus packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := LOG(1.0 + a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Compute the natural logarithm of one plus packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := LOG(1.0 + a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the natural logarithm of one plus packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := LOG(1.0 + a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the natural logarithm of one plus packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := LOG(1.0 + a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Compute the base-2 logarithm of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := LOG(a[i+63:i]) / LOG(2.0) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Compute the base-2 logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := LOG(a[i+63:i]) / LOG(2.0) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the base-2 logarithm of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := LOG(a[i+63:i]) / LOG(2.0) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the base-2 logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := LOG(a[i+63:i]) / LOG(2.0) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Compute the natural logarithm of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := LOG(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Compute the natural logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := LOG(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the natural logarithm of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := LOG(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the natural logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := LOG(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Compute the natural logarithm of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := LOG(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Compute the natural logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := LOG(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the natural logarithm of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := LOG(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the natural logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := LOG(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Convert the exponent of each packed double-precision (64-bit) floating-point - element in "a" to a double-precision floating-point number representing the integer - exponent, and store the results in "dst". This intrinsic essentially calculates - "floor(log2(x))" for each element. - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := ConvertExpFP64(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Convert the exponent of each packed double-precision (64-bit) floating-point - element in "a" to a double-precision floating-point number representing the integer - exponent, and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). This intrinsic essentially calculates - "floor(log2(x))" for each element. - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ConvertExpFP64(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Convert the exponent of each packed single-precision (32-bit) floating-point - element in "a" to a single-precision floating-point number representing the integer - exponent, and store the results in "dst". This intrinsic essentially calculates - "floor(log2(x))" for each element. - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := ConvertExpFP32(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Convert the exponent of each packed single-precision (32-bit) floating-point - element in "a" to a single-precision floating-point number representing the integer - exponent, and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). This intrinsic essentially calculates - "floor(log2(x))" for each element. - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ConvertExpFP32(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - Compute the exponential value of packed double-precision (64-bit) - floating-point elements in "a" raised by packed elements in "b", and store the results - in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := POW(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + Compute the exponential value of packed double-precision (64-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := POW(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - - Compute the exponential value of packed double-precision (64-bit) - floating-point elements in "a" raised by packed elements in "b", and store the results - in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := POW(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + + Compute the exponential value of packed double-precision (64-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := POW(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - Compute the exponential value of packed single-precision (32-bit) - floating-point elements in "a" raised by packed elements in "b", and store the results - in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := POW(a[i+31:i], b[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + Compute the exponential value of packed single-precision (32-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := POW(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - - Compute the exponential value of packed single-precision (32-bit) - floating-point elements in "a" raised by packed elements in "b", and store the results - in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := POW(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + + Compute the exponential value of packed single-precision (32-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := POW(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Computes the reciprocal of packed double-precision (64-bit) floating-point - elements in "a", storing the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := (1.0 / a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Computes the reciprocal of packed double-precision (64-bit) floating-point elements in "a", storing the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := (1.0 / a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Computes the reciprocal of packed double-precision (64-bit) floating-point - elements in "a", storing the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := (1.0 / a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Computes the reciprocal of packed double-precision (64-bit) floating-point elements in "a", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (1.0 / a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Computes the reciprocal of packed single-precision (32-bit) floating-point - elements in "a", storing the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := (1.0 / a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Computes the reciprocal of packed single-precision (32-bit) floating-point elements in "a", storing the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := (1.0 / a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Computes the reciprocal of packed single-precision (32-bit) floating-point - elements in "a", storing the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := (1.0 / a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Computes the reciprocal of packed single-precision (32-bit) floating-point elements in "a", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (1.0 / a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Compute the cumulative distribution function of packed double-precision - (64-bit) floating-point elements in "a" using the normal distribution, and store the - results in "dst". - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := CDFNormal(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Probability/Statistics + + + Compute the cumulative distribution function of packed double-precision (64-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". + FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := CDFNormal(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Probability/Statistics
- - - - - Compute the cumulative distribution function of packed double-precision - (64-bit) floating-point elements in "a" using the normal distribution, and store the - results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := CDFNormal(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Probability/Statistics + + + + + Compute the cumulative distribution function of packed double-precision (64-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := CDFNormal(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Probability/Statistics
- - - Compute the cumulative distribution function of packed single-precision - (32-bit) floating-point elements in "a" using the normal distribution, and store the - results in "dst". - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := CDFNormal(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Probability/Statistics + + + Compute the cumulative distribution function of packed single-precision (32-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". + FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := CDFNormal(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Probability/Statistics
- - - - - Compute the cumulative distribution function of packed single-precision - (32-bit) floating-point elements in "a" using the normal distribution, and store the - results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := CDFNormal(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Probability/Statistics + + + + + Compute the cumulative distribution function of packed single-precision (32-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := CDFNormal(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Probability/Statistics
- - - Compute the inverse cumulative distribution function of packed double-precision - (64-bit) floating-point elements in "a" using the normal distribution, and store the - results in "dst". - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := InverseCDFNormal(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Probability/Statistics + + + Compute the inverse cumulative distribution function of packed double-precision (64-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". + FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := InverseCDFNormal(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Probability/Statistics
- - - - - Compute the inverse cumulative distribution function of packed double-precision - (64-bit) floating-point elements in "a" using the normal distribution, and store the - results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := InverseCDFNormal(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Probability/Statistics + + + + + Compute the inverse cumulative distribution function of packed double-precision (64-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := InverseCDFNormal(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Probability/Statistics
- - - Compute the inverse cumulative distribution function of packed single-precision - (32-bit) floating-point elements in "a" using the normal distribution, and store the - results in "dst". - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := InverseCDFNormal(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Probability/Statistics + + + Compute the inverse cumulative distribution function of packed single-precision (32-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". + FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := InverseCDFNormal(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Probability/Statistics
- - - - - Compute the inverse cumulative distribution function of packed single-precision - (32-bit) floating-point elements in "a" using the normal distribution, and store the - results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := InverseCDFNormal(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Probability/Statistics + + + + + Compute the inverse cumulative distribution function of packed single-precision (32-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := InverseCDFNormal(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Probability/Statistics
- - - Compute the error function of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst". - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := ERF(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Probability/Statistics + + + Compute the error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ERF(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Probability/Statistics
- - - - - Compute the error function of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ERF(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Probability/Statistics + + + + + Compute the error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ERF(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Probability/Statistics
- - - Compute the complementary error function of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := 1.0 - ERF(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Probability/Statistics + + + Compute the complementary error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := 1.0 - ERF(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Probability/Statistics
- - - - - Compute the complementary error function of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := 1.0 - ERF(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Probability/Statistics + + + + + Compute the complementary error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := 1.0 - ERF(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Probability/Statistics
- - - Compute the error function of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst". - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := ERF(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Probability/Statistics + + + Compute the error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ERF(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Probability/Statistics
- - - - - Compute the error function of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ERF(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Probability/Statistics + + + + + Compute the error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ERF(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Probability/Statistics
- - - Compute the complementary error function of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 15 - i := j*32 - dst[i+63:i] := 1.0 - ERF(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Probability/Statistics + + + Compute the complementary error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 15 + i := j*32 + dst[i+63:i] := 1.0 - ERF(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Probability/Statistics
- - - - - Compute the complementary error function of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+63:i] := 1.0 - ERF(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Probability/Statistics + + + + + Compute the complementary error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+63:i] := 1.0 - ERF(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Probability/Statistics
- - - Compute the inverse error function of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := 1.0 / ERF(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Probability/Statistics + + + Compute the inverse error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := 1.0 / ERF(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Probability/Statistics
- - - - - Compute the inverse error function of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := 1.0 / ERF(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Probability/Statistics + + + + + Compute the inverse error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := 1.0 / ERF(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Probability/Statistics
- - - Compute the inverse error function of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 15 - i := j*32 - dst[i+63:i] := 1.0 / ERF(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Probability/Statistics + + + Compute the inverse error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 15 + i := j*32 + dst[i+63:i] := 1.0 / ERF(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Probability/Statistics
- - - - - Compute the inverse error function of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+63:i] := 1.0 / ERF(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Probability/Statistics + + + + + Compute the inverse error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+63:i] := 1.0 / ERF(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Probability/Statistics
- - - Compute the inverse complementary error function of packed double-precision - (64-bit) floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+63:i])) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Probability/Statistics + + + Compute the inverse complementary error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+63:i])) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Probability/Statistics
- - - - - Compute the inverse complementary error function of packed double-precision - (64-bit) floating-point elements in "a", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+63:i])) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Probability/Statistics + + + + + Compute the inverse complementary error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+63:i])) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Probability/Statistics
- - - Compute the inverse complementary error function of packed single-precision - (32-bit) floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 15 - i := j*32 - dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+31:i])) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Probability/Statistics + + + Compute the inverse complementary error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 15 + i := j*32 + dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+31:i])) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Probability/Statistics
- - - - - Compute the inverse complementary error function of packed single-precision - (32-bit) floating-point elements in "a", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+31:i])) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Probability/Statistics + + + + + Compute the inverse complementary error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+31:i])) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Probability/Statistics
- - - Round the packed double-precision (64-bit) floating-point elements in "a" up to - an integer value, and store the results as packed double-precision floating-point - elements in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := CEIL(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Special Math Functions + + + Round the packed double-precision (64-bit) floating-point elements in "a" up to an integer value, and store the results as packed double-precision floating-point elements in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := CEIL(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Round the packed double-precision (64-bit) floating-point elements in "a" up to - an integer value, and store the results as packed double-precision floating-point - elements in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := CEIL(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Round the packed double-precision (64-bit) floating-point elements in "a" up to an integer value, and store the results as packed double-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := CEIL(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Special Math Functions
- - - Round the packed single-precision (32-bit) floating-point elements in "a" up to - an integer value, and store the results as packed single-precision floating-point - elements in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := CEIL(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Special Math Functions + + + Round the packed single-precision (32-bit) floating-point elements in "a" up to an integer value, and store the results as packed single-precision floating-point elements in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := CEIL(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Round the packed single-precision (32-bit) floating-point elements in "a" up to - an integer value, and store the results as packed single-precision floating-point - elements in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := CEIL(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Round the packed single-precision (32-bit) floating-point elements in "a" up to an integer value, and store the results as packed single-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := CEIL(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Special Math Functions
- - - Round the packed double-precision (64-bit) floating-point elements in "a" down - to an integer value, and store the results as packed double-precision floating-point - elements in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := FLOOR(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Special Math Functions + + + Round the packed double-precision (64-bit) floating-point elements in "a" down to an integer value, and store the results as packed double-precision floating-point elements in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := FLOOR(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Round the packed double-precision (64-bit) floating-point elements in "a" down - to an integer value, and store the results as packed double-precision floating-point - elements in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := FLOOR(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Round the packed double-precision (64-bit) floating-point elements in "a" down to an integer value, and store the results as packed double-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := FLOOR(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Special Math Functions
- - - Round the packed single-precision (32-bit) floating-point elements in "a" down - to an integer value, and store the results as packed single-precision floating-point - elements in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := FLOOR(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Special Math Functions + + + Round the packed single-precision (32-bit) floating-point elements in "a" down to an integer value, and store the results as packed single-precision floating-point elements in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := FLOOR(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Round the packed single-precision (32-bit) floating-point elements in "a" down - to an integer value, and store the results as packed single-precision floating-point - elements in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := FLOOR(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Round the packed single-precision (32-bit) floating-point elements in "a" down to an integer value, and store the results as packed single-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := FLOOR(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Special Math Functions
- - - Rounds each packed double-precision (64-bit) floating-point element in "a" to - the nearest integer value and stores the results as packed double-precision - floating-point elements in "dst". - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := NearbyInt(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Special Math Functions + + + Rounds each packed double-precision (64-bit) floating-point element in "a" to the nearest integer value and stores the results as packed double-precision floating-point elements in "dst". + FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := NearbyInt(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Rounds each packed double-precision (64-bit) floating-point element in "a" to - the nearest integer value and stores the results as packed double-precision - floating-point elements in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := NearbyInt(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Rounds each packed double-precision (64-bit) floating-point element in "a" to the nearest integer value and stores the results as packed double-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := NearbyInt(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Special Math Functions
- - - Rounds each packed single-precision (32-bit) floating-point element in "a" to - the nearest integer value and stores the results as packed single-precision - floating-point elements in "dst". - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := NearbyInt(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Special Math Functions + + + Rounds each packed single-precision (32-bit) floating-point element in "a" to the nearest integer value and stores the results as packed single-precision floating-point elements in "dst". + FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := NearbyInt(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Rounds each packed single-precision (32-bit) floating-point element in "a" to - the nearest integer value and stores the results as packed single-precision - floating-point elements in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := NearbyInt(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Rounds each packed single-precision (32-bit) floating-point element in "a" to the nearest integer value and stores the results as packed single-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := NearbyInt(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Special Math Functions
- - - Rounds the packed double-precision (64-bit) floating-point elements in "a" to - the nearest even integer value and stores the results in "dst". - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := RoundToNearestEven(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Special Math Functions + + + Rounds the packed double-precision (64-bit) floating-point elements in "a" to the nearest even integer value and stores the results in "dst". + FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := RoundToNearestEven(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Rounds the packed double-precision (64-bit) floating-point elements in "a" to - the nearest even integer value and stores the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := RoundToNearestEven(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Rounds the packed double-precision (64-bit) floating-point elements in "a" to the nearest even integer value and stores the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := RoundToNearestEven(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Special Math Functions
- - - Rounds the packed single-precision (32-bit) floating-point elements in "a" to - the nearest even integer value and stores the results in "dst". - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := RoundToNearestEven(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Special Math Functions + + + Rounds the packed single-precision (32-bit) floating-point elements in "a" to the nearest even integer value and stores the results in "dst". + FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := RoundToNearestEven(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Rounds the packed single-precision (32-bit) floating-point elements in "a" to - the nearest even integer value and stores the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := RoundToNearestEven(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Rounds the packed single-precision (32-bit) floating-point elements in "a" to the nearest even integer value and stores the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := RoundToNearestEven(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Special Math Functions
- - - Round the packed double-precision (64-bit) floating-point elements in "a" to - the nearest integer value, and store the results as packed double-precision - floating-point elements in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := ROUND(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Special Math Functions + + + Round the packed double-precision (64-bit) floating-point elements in "a" to the nearest integer value, and store the results as packed double-precision floating-point elements in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ROUND(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Round the packed double-precision (64-bit) floating-point elements in "a" to - the nearest integer value, and store the results as packed double-precision - floating-point elements in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ROUND(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Round the packed double-precision (64-bit) floating-point elements in "a" to the nearest integer value, and store the results as packed double-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ROUND(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Special Math Functions
- - - Truncate the packed double-precision (64-bit) floating-point elements in "a", - and store the results as packed double-precision floating-point elements in "dst". - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := TRUNCATE(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Special Math Functions + + + Truncate the packed double-precision (64-bit) floating-point elements in "a", and store the results as packed double-precision floating-point elements in "dst". + FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := TRUNCATE(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Truncate the packed double-precision (64-bit) floating-point elements in "a", - and store the results as packed double-precision floating-point elements in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := TRUNCATE(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Truncate the packed double-precision (64-bit) floating-point elements in "a", and store the results as packed double-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := TRUNCATE(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Special Math Functions
- - - Truncate the packed single-precision (32-bit) floating-point elements in "a", - and store the results as packed single-precision floating-point elements in "dst". - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := TRUNCATE(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Special Math Functions + + + Truncate the packed single-precision (32-bit) floating-point elements in "a", and store the results as packed single-precision floating-point elements in "dst". + FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := TRUNCATE(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Truncate the packed single-precision (32-bit) floating-point elements in "a", - and store the results as packed single-precision floating-point elements in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := TRUNCATE(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Truncate the packed single-precision (32-bit) floating-point elements in "a", and store the results as packed single-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := TRUNCATE(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Divide packed signed 32-bit integers in "a" by packed elements in "b", and - store the truncated results in "dst". - - FOR j := 0 to 15 - i := 32*j - IF b[i+31:i] == 0 - #DE - FI - dst[i+31:i] := Truncate32(a[i+31:i] / b[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Arithmetic + + + + Divide packed signed 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 15 + i := 32*j + IF b[i+31:i] == 0 + #DE + FI + dst[i+31:i] := Truncate32(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Divide packed signed 32-bit integers in "a" by packed elements in "b", and - store the truncated results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - IF k[j] - IF b[i+31:i] == 0 + + + + + + Divide packed signed 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + IF b[i+31:i] == 0 #DE - FI - dst[i+31:i] := Truncate32(a[i+31:i] / b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Arithmetic + FI + dst[i+31:i] := Truncate32(a[i+31:i] / b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ AVX512F +
immintrin.h
+ Arithmetic
- - - - Divide packed signed 8-bit integers in "a" by packed elements in "b", and store - the truncated results in "dst". - - FOR j := 0 to 63 - i := 8*j - IF b[i+7:i] == 0 - #DE - FI - dst[i+7:i] := Truncate8(a[i+7:i] / b[i+7:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Arithmetic + + + + Divide packed signed 8-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 63 + i := 8*j + IF b[i+7:i] == 0 + #DE + FI + dst[i+7:i] := Truncate8(a[i+7:i] / b[i+7:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Divide packed signed 16-bit integers in "a" by packed elements in "b", and - store the truncated results in "dst". - - FOR j := 0 to 31 - i := 16*j - IF b[i+15:i] == 0 - #DE - FI - dst[i+15:i] := Truncate16(a[i+15:i] / b[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Arithmetic + + + + Divide packed signed 16-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 31 + i := 16*j + IF b[i+15:i] == 0 + #DE + FI + dst[i+15:i] := Truncate16(a[i+15:i] / b[i+15:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Divide packed signed 64-bit integers in "a" by packed elements in "b", and - store the truncated results in "dst". - - FOR j := 0 to 7 - i := 64*j - IF b[i+63:i] == 0 - #DE - FI - dst[i+63:i] := Truncate64(a[i+63:i] / b[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Arithmetic + + + + Divide packed signed 64-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 7 + i := 64*j + IF b[i+63:i] == 0 + #DE + FI + dst[i+63:i] := Truncate64(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Divide packed 32-bit integers in "a" by packed elements in "b", and store the - remainders as packed 32-bit integers in "dst". - FOR j := 0 to 15 - i := 32*j - dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Arithmetic + + + + Divide packed 32-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Divide packed 32-bit integers in "a" by packed elements in "b", and store the - remainders as packed 32-bit integers in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - FOR j := 0 to 15 - i := 32*j - IF k[j] - dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Divide packed 32-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Divide packed 8-bit integers in "a" by packed elements in "b", and store the - remainders as packed 32-bit integers in "dst". - FOR j := 0 to 63 - i := 8*j - dst[i+7:i] := REMAINDER(a[i+7:i] / b[i+7:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Arithmetic + + + + Divide packed 8-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + FOR j := 0 to 63 + i := 8*j + dst[i+7:i] := REMAINDER(a[i+7:i] / b[i+7:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Divide packed 16-bit integers in "a" by packed elements in "b", and store the - remainders as packed 32-bit integers in "dst". - FOR j := 0 to 31 - i := 16*j - dst[i+15:i] := REMAINDER(a[i+15:i] / b[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Arithmetic + + + + Divide packed 16-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + FOR j := 0 to 31 + i := 16*j + dst[i+15:i] := REMAINDER(a[i+15:i] / b[i+15:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Divide packed 64-bit integers in "a" by packed elements in "b", and store the - remainders as packed 32-bit integers in "dst". - FOR j := 0 to 7 - i := 64*j - dst[i+63:i] := REMAINDER(a[i+63:i] / b[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Arithmetic + + + + Divide packed 64-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + FOR j := 0 to 7 + i := 64*j + dst[i+63:i] := REMAINDER(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and - store the truncated results in "dst". - - FOR j := 0 to 15 - i := 32*j - IF b[i+31:i] == 0 - #DE - FI - dst[i+31:i] := Truncate32(a[i+31:i] / b[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 15 + i := 32*j + IF b[i+31:i] == 0 + #DE + FI + dst[i+31:i] := Truncate32(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and - store the truncated results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - IF k[j] - IF b[i+31:i] == 0 + + + + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + IF b[i+31:i] == 0 #DE - FI - dst[i+31:i] := Truncate32(a[i+31:i] / b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Arithmetic + FI + dst[i+31:i] := Truncate32(a[i+31:i] / b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ AVX512F +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 8-bit integers in "a" by packed elements in "b", and - store the truncated results in "dst". - - FOR j := 0 to 63 - i := 8*j - IF b[i+7:i] == 0 - #DE - FI - dst[i+7:i] := Truncate8(a[i+7:i] / b[i+7:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 8-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 63 + i := 8*j + IF b[i+7:i] == 0 + #DE + FI + dst[i+7:i] := Truncate8(a[i+7:i] / b[i+7:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 16-bit integers in "a" by packed elements in "b", and - store the truncated results in "dst". - - FOR j := 0 to 31 - i := 16*j - IF b[i+15:i] == 0 - #DE - FI - dst[i+15:i] := Truncate16(a[i+15:i] / b[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 16-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 31 + i := 16*j + IF b[i+15:i] == 0 + #DE + FI + dst[i+15:i] := Truncate16(a[i+15:i] / b[i+15:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 64-bit integers in "a" by packed elements in "b", and - store the truncated results in "dst". - - FOR j := 0 to 7 - i := 64*j - IF b[i+63:i] == 0 - #DE - FI - dst[i+63:i] := Truncate64(a[i+63:i] / b[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 64-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 7 + i := 64*j + IF b[i+63:i] == 0 + #DE + FI + dst[i+63:i] := Truncate64(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and - store the remainders as packed unsigned 32-bit integers in "dst". - FOR j := 0 to 15 - i := 32*j - dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and - store the remainders as packed unsigned 32-bit integers in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - FOR j := 0 to 15 - i := 32*j - IF k[j] - dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 8-bit integers in "a" by packed elements in "b", and - store the remainders as packed unsigned 32-bit integers in "dst". - FOR j := 0 to 63 - i := 8*j - dst[i+7:i] := REMAINDER(a[i+7:i] / b[i+7:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 8-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + FOR j := 0 to 63 + i := 8*j + dst[i+7:i] := REMAINDER(a[i+7:i] / b[i+7:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 16-bit integers in "a" by packed elements in "b", and - store the remainders as packed unsigned 32-bit integers in "dst". - FOR j := 0 to 31 - i := 16*j - dst[i+15:i] := REMAINDER(a[i+15:i] / b[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 16-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + FOR j := 0 to 31 + i := 16*j + dst[i+15:i] := REMAINDER(a[i+15:i] / b[i+15:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 64-bit integers in "a" by packed elements in "b", and - store the remainders as packed unsigned 32-bit integers in "dst". - FOR j := 0 to 7 - i := 64*j - dst[i+63:i] := REMAINDER(a[i+63:i] / b[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 64-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + FOR j := 0 to 7 + i := 64*j + dst[i+63:i] := REMAINDER(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Arithmetic
- - - Compute the base-2 logarithm of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := LOG(a[i+31:i]) / LOG(2.0) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Compute the base-2 logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := LOG(a[i+31:i]) / LOG(2.0) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the base-2 logarithm of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := LOG(a[i+31:i]) / LOG(2.0) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions -
- - - - - - - - - Add packed double-precision (64-bit) floating-point elements in "a" and "b", - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] + b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compute the base-2 logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := LOG(a[i+31:i]) / LOG(2.0) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions +
+ + + + + + + + + Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Add packed double-precision (64-bit) floating-point elements in "a" and "b", - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] + b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Add packed double-precision (64-bit) floating-point elements in "a" and "b", - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] + b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Add packed double-precision (64-bit) floating-point elements in "a" and "b", - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] + b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Add packed single-precision (32-bit) floating-point elements in "a" and "b", - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] + b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Add packed single-precision (32-bit) floating-point elements in "a" and "b", - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] + b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Add packed single-precision (32-bit) floating-point elements in "a" and "b", - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] + b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Add packed single-precision (32-bit) floating-point elements in "a" and "b", - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] + b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Divide packed double-precision (64-bit) floating-point elements in "a" by - packed elements in "b", and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - IF k[j] - dst[i+63:i] := a[i+63:i] / b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + IF k[j] + dst[i+63:i] := a[i+63:i] / b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Divide packed double-precision (64-bit) floating-point elements in "a" by - packed elements in "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - IF k[j] - dst[i+63:i] := a[i+63:i] / b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + IF k[j] + dst[i+63:i] := a[i+63:i] / b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Divide packed double-precision (64-bit) floating-point elements in "a" by - packed elements in "b", and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - IF k[j] - dst[i+63:i] := a[i+63:i] / b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + IF k[j] + dst[i+63:i] := a[i+63:i] / b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Divide packed double-precision (64-bit) floating-point elements in "a" by - packed elements in "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - IF k[j] - dst[i+63:i] := a[i+63:i] / b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + IF k[j] + dst[i+63:i] := a[i+63:i] / b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Divide packed single-precision (32-bit) floating-point elements in "a" by - packed elements in "b", and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - IF k[j] - dst[i+31:i] := a[i+31:i] / b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + IF k[j] + dst[i+31:i] := a[i+31:i] / b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Divide packed single-precision (32-bit) floating-point elements in "a" by - packed elements in "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - IF k[j] - dst[i+31:i] := a[i+31:i] / b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + IF k[j] + dst[i+31:i] := a[i+31:i] / b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Divide packed single-precision (32-bit) floating-point elements in "a" by - packed elements in "b", and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - IF k[j] - dst[i+31:i] := a[i+31:i] / b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + IF k[j] + dst[i+31:i] := a[i+31:i] / b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Divide packed single-precision (32-bit) floating-point elements in "a" by - packed elements in "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - IF k[j] - dst[i+31:i] := a[i+31:i] / b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + IF k[j] + dst[i+31:i] := a[i+31:i] / b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit - is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := c[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit - is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit - is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := c[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit - is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit - is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := c[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit - is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit - is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := c[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit - is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "c" - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF ((j & 1) == 0) dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE + ELSE dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - FI - ELSE - dst[i+63:i] := c[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "a" - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF ((j & 1) == 0) dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE + ELSE dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - FI - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF ((j & 1) == 0) dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE + ELSE dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "c" - when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF ((j & 1) == 0) dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE + ELSE dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - FI - ELSE - dst[i+63:i] := c[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "a" - when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF ((j & 1) == 0) dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE + ELSE dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - FI - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF ((j & 1) == 0) dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE + ELSE dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "c" - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF ((j & 1) == 0) dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE + ELSE dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - FI - ELSE - dst[i+31:i] := c[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "a" - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF ((j & 1) == 0) dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE + ELSE dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - FI - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF ((j & 1) == 0) dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE + ELSE dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "c" - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF ((j & 1) == 0) dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE + ELSE dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - FI - ELSE - dst[i+31:i] := c[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "a" - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF ((j & 1) == 0) dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE + ELSE dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - FI - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF ((j & 1) == 0) dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE + ELSE dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := c[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is - not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask - bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := c[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask - bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is - not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := c[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is - not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := c[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is - not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "c" - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF ((j & 1) == 0) dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE + ELSE dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - FI - ELSE - dst[i+63:i] := c[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "a" - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF ((j & 1) == 0) dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE + ELSE dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - FI - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF ((j & 1) == 0) dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE + ELSE dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "c" - when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF ((j & 1) == 0) dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE + ELSE dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - FI - ELSE - dst[i+63:i] := c[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "a" - when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF ((j & 1) == 0) dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE + ELSE dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - FI - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF ((j & 1) == 0) dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE + ELSE dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "c" - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF ((j & 1) == 0) dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE + ELSE dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - FI - ELSE - dst[i+31:i] := c[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "a" - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF ((j & 1) == 0) dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE + ELSE dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - FI - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF ((j & 1) == 0) dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE + ELSE dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "c" - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF ((j & 1) == 0) dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE + ELSE dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - FI - ELSE - dst[i+31:i] := c[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "a" - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF ((j & 1) == 0) dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE + ELSE dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - FI - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF ((j & 1) == 0) dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE + ELSE dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst" using writemask "k" (elements are copied from "c" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := c[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst" using writemask "k" (elements are copied from "c" when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := c[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst" using writemask "k" (elements are copied from "c" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := c[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst" using writemask "k" (elements are copied from "c" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := c[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using writemask "k" (elements are copied from "c" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := c[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using writemask "k" (elements are copied from "c" when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := c[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using writemask "k" (elements are copied from "c" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := c[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using writemask "k" (elements are copied from "c" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := c[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and - "b", and store packed maximum values in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). [max_float_note] - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [max_float_note] + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and - "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). [max_float_note] - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [max_float_note] + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and - "b", and store packed maximum values in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). [max_float_note] - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [max_float_note] + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and - "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). [max_float_note] - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [max_float_note] + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and - "b", and store packed maximum values in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). [max_float_note] - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [max_float_note] + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and - "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). [max_float_note] - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [max_float_note] + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and - "b", and store packed maximum values in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). [max_float_note] - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [max_float_note] + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and - "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). [max_float_note] - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [max_float_note] + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and - "b", and store packed minimum values in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). [min_float_note] - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [min_float_note] + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and - "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). [min_float_note] - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [min_float_note] + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and - "b", and store packed minimum values in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). [min_float_note] - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [min_float_note] + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and - "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). [min_float_note] - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [min_float_note] + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and - "b", and store packed minimum values in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). [min_float_note] - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [min_float_note] + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and - "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). [min_float_note] - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [min_float_note] + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and - "b", and store packed minimum values in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). [min_float_note] - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [min_float_note] + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and - "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). [min_float_note] - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [min_float_note] + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] * b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] * b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] * b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] * b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] * b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] * b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] * b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] * b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). RM. - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] * b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). RM. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] * b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] * b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] * b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compute the absolute value of packed signed 32-bit integers in "a", and store - the unsigned results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := ABS(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compute the absolute value of packed signed 32-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := ABS(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Compute the absolute value of packed signed 32-bit integers in "a", and store - the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := ABS(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + Compute the absolute value of packed signed 32-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := ABS(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compute the absolute value of packed signed 32-bit integers in "a", and store - the unsigned results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := ABS(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compute the absolute value of packed signed 32-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := ABS(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Compute the absolute value of packed signed 32-bit integers in "a", and store - the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := ABS(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + Compute the absolute value of packed signed 32-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := ABS(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - Compute the absolute value of packed signed 64-bit integers in "a", and store - the unsigned results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := ABS(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + Compute the absolute value of packed signed 64-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ABS(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compute the absolute value of packed signed 64-bit integers in "a", and store - the unsigned results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := ABS(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compute the absolute value of packed signed 64-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ABS(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Compute the absolute value of packed signed 64-bit integers in "a", and store - the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := ABS(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + Compute the absolute value of packed signed 64-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ABS(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - Compute the absolute value of packed signed 64-bit integers in "a", and store - the unsigned results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := ABS(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + Compute the absolute value of packed signed 64-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ABS(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compute the absolute value of packed signed 64-bit integers in "a", and store - the unsigned results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := ABS(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compute the absolute value of packed signed 64-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ABS(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Compute the absolute value of packed signed 64-bit integers in "a", and store - the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := ABS(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + Compute the absolute value of packed signed 64-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ABS(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Add packed 32-bit integers in "a" and "b", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] + b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Add packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Add packed 32-bit integers in "a" and "b", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] + b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Add packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Add packed 32-bit integers in "a" and "b", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] + b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Add packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Add packed 32-bit integers in "a" and "b", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] + b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Add packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Add packed 64-bit integers in "a" and "b", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] + b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Add packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Add packed 64-bit integers in "a" and "b", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] + b[i+63:i] - ELSE - dst[i+63:i] :=0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Add packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] :=0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Add packed 64-bit integers in "a" and "b", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] + b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Add packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Add packed 64-bit integers in "a" and "b", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] + b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Add packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed signed 32-bit integers in "a" and "b", and store packed maximum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed signed 32-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed signed 32-bit integers in "a" and "b", and store packed maximum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed signed 32-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed signed 32-bit integers in "a" and "b", and store packed maximum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed signed 32-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed signed 32-bit integers in "a" and "b", and store packed maximum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed signed 32-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed signed 64-bit integers in "a" and "b", and store packed maximum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed signed 64-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed signed 64-bit integers in "a" and "b", and store packed maximum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed signed 64-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Compare packed signed 64-bit integers in "a" and "b", and store packed maximum - values in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + Compare packed signed 64-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed signed 64-bit integers in "a" and "b", and store packed maximum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed signed 64-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed signed 64-bit integers in "a" and "b", and store packed maximum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed signed 64-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Compare packed signed 64-bit integers in "a" and "b", and store packed maximum - values in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + Compare packed signed 64-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed unsigned 32-bit integers in "a" and "b", and store packed - maximum values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed unsigned 32-bit integers in "a" and "b", and store packed - maximum values in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed unsigned 32-bit integers in "a" and "b", and store packed - maximum values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed unsigned 32-bit integers in "a" and "b", and store packed - maximum values in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed unsigned 64-bit integers in "a" and "b", and store packed - maximum values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed unsigned 64-bit integers in "a" and "b", and store packed - maximum values in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Compare packed unsigned 64-bit integers in "a" and "b", and store packed - maximum values in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed unsigned 64-bit integers in "a" and "b", and store packed - maximum values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed unsigned 64-bit integers in "a" and "b", and store packed - maximum values in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Compare packed unsigned 64-bit integers in "a" and "b", and store packed - maximum values in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed signed 32-bit integers in "a" and "b", and store packed minimum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed signed 32-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed signed 32-bit integers in "a" and "b", and store packed minimum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed signed 32-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed signed 32-bit integers in "a" and "b", and store packed minimum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed signed 32-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed signed 32-bit integers in "a" and "b", and store packed minimum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed signed 32-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed signed 64-bit integers in "a" and "b", and store packed minimum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed signed 64-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed signed 64-bit integers in "a" and "b", and store packed minimum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed signed 64-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Compare packed signed 64-bit integers in "a" and "b", and store packed minimum - values in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + Compare packed signed 64-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed signed 64-bit integers in "a" and "b", and store packed minimum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed signed 64-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed signed 64-bit integers in "a" and "b", and store packed minimum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed signed 64-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Compare packed signed 64-bit integers in "a" and "b", and store packed minimum - values in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + Compare packed signed 64-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed unsigned 32-bit integers in "a" and "b", and store packed - minimum values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed unsigned 32-bit integers in "a" and "b", and store packed - minimum values in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed unsigned 32-bit integers in "a" and "b", and store packed - minimum values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed unsigned 32-bit integers in "a" and "b", and store packed - minimum values in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed unsigned 64-bit integers in "a" and "b", and store packed - minimum values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed unsigned 64-bit integers in "a" and "b", and store packed - minimum values in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Compare packed unsigned 64-bit integers in "a" and "b", and store packed - minimum values in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compare packed unsigned 64-bit integers in "a" and "b", and store packed - minimum values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed unsigned 64-bit integers in "a" and "b", and store packed - minimum values in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Compare packed unsigned 64-bit integers in "a" and "b", and store packed - minimum values in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply the low signed 32-bit integers from each packed 64-bit element in "a" - and "b", and store the signed 64-bit results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply the low signed 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply the low signed 32-bit integers from each packed 64-bit element in "a" - and "b", and store the signed 64-bit results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply the low signed 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply the low signed 32-bit integers from each packed 64-bit element in "a" - and "b", and store the signed 64-bit results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply the low signed 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply the low signed 32-bit integers from each packed 64-bit element in "a" - and "b", and store the signed 64-bit results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply the low signed 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply the packed 32-bit integers in "a" and "b", producing intermediate - 64-bit integers, and store the low 32 bits of the intermediate integers in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - tmp[63:0] := a[i+31:i] * b[i+31:i] - dst[i+31:i] := tmp[31:0] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply the packed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + tmp[63:0] := a[i+31:i] * b[i+31:i] + dst[i+31:i] := tmp[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply the packed 32-bit integers in "a" and "b", producing intermediate - 64-bit integers, and store the low 32 bits of the intermediate integers in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - tmp[63:0] := a[i+31:i] * b[i+31:i] - dst[i+31:i] := tmp[31:0] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply the packed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + tmp[63:0] := a[i+31:i] * b[i+31:i] + dst[i+31:i] := tmp[31:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply the packed 32-bit integers in "a" and "b", producing intermediate - 64-bit integers, and store the low 32 bits of the intermediate integers in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - tmp[63:0] := a[i+31:i] * b[i+31:i] - dst[i+31:i] := tmp[31:0] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply the packed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + tmp[63:0] := a[i+31:i] * b[i+31:i] + dst[i+31:i] := tmp[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply the packed 32-bit integers in "a" and "b", producing intermediate - 64-bit integers, and store the low 32 bits of the intermediate integers in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - tmp[63:0] := a[i+31:i] * b[i+31:i] - dst[i+31:i] := tmp[31:0] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply the packed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + tmp[63:0] := a[i+31:i] * b[i+31:i] + dst[i+31:i] := tmp[31:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply the low unsigned 32-bit integers from each packed 64-bit element in - "a" and "b", and store the unsigned 64-bit results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+31:i] * b[i+31:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply the low unsigned 32-bit integers from each packed 64-bit element in - "a" and "b", and store the unsigned 64-bit results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+31:i] * b[i+31:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply the low unsigned 32-bit integers from each packed 64-bit element in - "a" and "b", and store the unsigned 64-bit results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+31:i] * b[i+31:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply the low unsigned 32-bit integers from each packed 64-bit element in - "a" and "b", and store the unsigned 64-bit results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+31:i] * b[i+31:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] - b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] - b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] - b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] - b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] - b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] - b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] - b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] - b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compute the approximate reciprocal of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 2^-14. - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := (1.0 / a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := (1.0 / a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Compute the approximate reciprocal of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 2^-14. - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := (1.0 / a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := (1.0 / a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - Compute the approximate reciprocal of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst". The maximum relative - error for this approximation is less than 2^-14. - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := (1.0 / a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := (1.0 / a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compute the approximate reciprocal of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 2^-14. - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := (1.0 / a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := (1.0 / a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Compute the approximate reciprocal of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 2^-14. - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := (1.0 / a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := (1.0 / a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - Compute the approximate reciprocal of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst". The maximum relative - error for this approximation is less than 2^-14. - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := (1.0 / a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (1.0 / a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compute the approximate reciprocal of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 2^-14. - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := (1.0 / a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := (1.0 / a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Compute the approximate reciprocal of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 2^-14. - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := (1.0 / a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := (1.0 / a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - Compute the approximate reciprocal of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst". The maximum relative - error for this approximation is less than 2^-14. - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := (1.0 / a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := (1.0 / a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compute the approximate reciprocal of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 2^-14. - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := (1.0 / a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := (1.0 / a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Compute the approximate reciprocal of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 2^-14. - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := (1.0 / a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := (1.0 / a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - Compute the approximate reciprocal of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst". The maximum relative - error for this approximation is less than 2^-14. - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := (1.0 / a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := (1.0 / a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - Compute the approximate reciprocal square root of packed double-precision - (64-bit) floating-point elements in "a", and store the results in "dst". The maximum - relative error for this approximation is less than 2^-14. - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := (1.0 / SQRT(a[i+63:i])) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := (1.0 / SQRT(a[i+63:i])) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compute the approximate reciprocal square root of packed double-precision - (64-bit) floating-point elements in "a", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). The - maximum relative error for this approximation is less than 2^-14. - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := (1.0 / SQRT(a[i+63:i])) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := (1.0 / SQRT(a[i+63:i])) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Compute the approximate reciprocal square root of packed double-precision - (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 2^-14. - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := (1.0 / SQRT(a[i+63:i])) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := (1.0 / SQRT(a[i+63:i])) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - Compute the approximate reciprocal square root of packed double-precision - (64-bit) floating-point elements in "a", and store the results in "dst". The maximum - relative error for this approximation is less than 2^-14. - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := (1.0 / SQRT(a[i+63:i])) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (1.0 / SQRT(a[i+63:i])) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compute the approximate reciprocal square root of packed double-precision - (64-bit) floating-point elements in "a", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). The - maximum relative error for this approximation is less than 2^-14. - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := (1.0 / SQRT(a[i+63:i])) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := (1.0 / SQRT(a[i+63:i])) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Compute the approximate reciprocal square root of packed double-precision - (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 2^-14. - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := (1.0 / SQRT(a[i+63:i])) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := (1.0 / SQRT(a[i+63:i])) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - Compute the approximate reciprocal square root of packed single-precision - (32-bit) floating-point elements in "a", and store the results in "dst". The maximum - relative error for this approximation is less than 2^-14. - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := (1.0 / SQRT(a[i+31:i])) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := (1.0 / SQRT(a[i+31:i])) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compute the approximate reciprocal square root of packed single-precision - (32-bit) floating-point elements in "a", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). The - maximum relative error for this approximation is less than 2^-14. - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := (1.0 / SQRT(a[i+31:i])) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := (1.0 / SQRT(a[i+31:i])) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Compute the approximate reciprocal square root of packed single-precision - (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 2^-14. - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := (1.0 / SQRT(a[i+31:i])) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := (1.0 / SQRT(a[i+31:i])) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - Compute the approximate reciprocal square root of packed single-precision - (32-bit) floating-point elements in "a", and store the results in "dst". The maximum - relative error for this approximation is less than 2^-14. - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := (1.0 / SQRT(a[i+31:i])) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := (1.0 / SQRT(a[i+31:i])) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compute the approximate reciprocal square root of packed single-precision - (32-bit) floating-point elements in "a", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). The - maximum relative error for this approximation is less than 2^-14. - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := (1.0 / SQRT(a[i+31:i])) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := (1.0 / SQRT(a[i+31:i])) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Compute the approximate reciprocal square root of packed single-precision - (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 2^-14. - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := (1.0 / SQRT(a[i+31:i])) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := (1.0 / SQRT(a[i+31:i])) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed double-precision (64-bit) floating-point elements in "b" from - packed double-precision (64-bit) floating-point elements in "a", and store the results - in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] - b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Subtract packed double-precision (64-bit) floating-point elements in "b" from - packed double-precision (64-bit) floating-point elements in "a", and store the results - in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is - not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] - b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed double-precision (64-bit) floating-point elements in "b" from - packed double-precision (64-bit) floating-point elements in "a", and store the results - in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] - b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Subtract packed double-precision (64-bit) floating-point elements in "b" from - packed double-precision (64-bit) floating-point elements in "a", and store the results - in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is - not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] - b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed single-precision (32-bit) floating-point elements in "b" from - packed single-precision (32-bit) floating-point elements in "a", and store the results - in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] - b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Subtract packed single-precision (32-bit) floating-point elements in "b" from - packed single-precision (32-bit) floating-point elements in "a", and store the results - in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is - not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] - b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed single-precision (32-bit) floating-point elements in "b" from - packed single-precision (32-bit) floating-point elements in "a", and store the results - in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] - b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Subtract packed single-precision (32-bit) floating-point elements in "b" from - packed single-precision (32-bit) floating-point elements in "a", and store the results - in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is - not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] - b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Arithmetic + + + + + Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Concatenate "a" and "b" into a 64-byte immediate result, shift the result right - by "imm8" 32-bit elements, and store the low 32 bytes (8 elements) in "dst". - - temp[511:256] := a[255:0] - temp[255:0] := b[255:0] - temp[511:0] := temp[511:0] >> (32*imm8[2:0]) - dst[255:0] := temp[255:0] - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Concatenate "a" and "b" into a 64-byte immediate result, shift the result right by "imm8" 32-bit elements, and store the low 32 bytes (8 elements) in "dst". + +temp[511:256] := a[255:0] +temp[255:0] := b[255:0] +temp[511:0] := temp[511:0] >> (32*imm8[2:0]) +dst[255:0] := temp[255:0] +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Concatenate "a" and "b" into a 64-byte immediate result, shift the result right - by "imm8" 32-bit elements, and store the low 32 bytes (8 elements) in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - temp[511:256] := a[255:0] - temp[255:0] := b[255:0] - temp[511:0] := temp[511:0] >> (32*imm8[2:0]) - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := temp[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Concatenate "a" and "b" into a 64-byte immediate result, shift the result right by "imm8" 32-bit elements, and store the low 32 bytes (8 elements) in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +temp[511:256] := a[255:0] +temp[255:0] := b[255:0] +temp[511:0] := temp[511:0] >> (32*imm8[2:0]) +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := temp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Concatenate "a" and "b" into a 64-byte immediate result, shift the result right - by "imm8" 32-bit elements, and store the low 32 bytes (8 elements) in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - temp[511:256] := a[255:0] - temp[255:0] := b[255:0] - temp[511:0] := temp[511:0] >> (32*imm8[2:0]) - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := temp[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Concatenate "a" and "b" into a 64-byte immediate result, shift the result right by "imm8" 32-bit elements, and store the low 32 bytes (8 elements) in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +temp[511:256] := a[255:0] +temp[255:0] := b[255:0] +temp[511:0] := temp[511:0] >> (32*imm8[2:0]) +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := temp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Concatenate "a" and "b" into a 32-byte immediate result, shift the result right - by "imm8" 32-bit elements, and store the low 16 bytes (4 elements) in "dst". - - temp[255:128] := a[127:0] - temp[127:0] := b[127:0] - temp[255:0] := temp[255:0] >> (32*imm8[1:0]) - dst[127:0] := temp[127:0] - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Concatenate "a" and "b" into a 32-byte immediate result, shift the result right by "imm8" 32-bit elements, and store the low 16 bytes (4 elements) in "dst". + +temp[255:128] := a[127:0] +temp[127:0] := b[127:0] +temp[255:0] := temp[255:0] >> (32*imm8[1:0]) +dst[127:0] := temp[127:0] +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Concatenate "a" and "b" into a 32-byte immediate result, shift the result right - by "imm8" 32-bit elements, and store the low 16 bytes (4 elements) in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - temp[255:128] := a[127:0] - temp[127:0] := b[127:0] - temp[255:0] := temp[255:0] >> (32*imm8[1:0]) - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := temp[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Concatenate "a" and "b" into a 32-byte immediate result, shift the result right by "imm8" 32-bit elements, and store the low 16 bytes (4 elements) in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +temp[255:128] := a[127:0] +temp[127:0] := b[127:0] +temp[255:0] := temp[255:0] >> (32*imm8[1:0]) +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := temp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Concatenate "a" and "b" into a 32-byte immediate result, shift the result right - by "imm8" 32-bit elements, and store the low 16 bytes (4 elements) in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - temp[255:128] := a[127:0] - temp[127:0] := b[127:0] - temp[255:0] := temp[255:0] >> (32*imm8[1:0]) - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := temp[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Concatenate "a" and "b" into a 32-byte immediate result, shift the result right by "imm8" 32-bit elements, and store the low 16 bytes (4 elements) in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +temp[255:128] := a[127:0] +temp[127:0] := b[127:0] +temp[255:0] := temp[255:0] >> (32*imm8[1:0]) +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := temp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Concatenate "a" and "b" into a 64-byte immediate result, shift the result right - by "imm8" 64-bit elements, and store the low 32 bytes (4 elements) in "dst". - - temp[511:256] := a[255:0] - temp[255:0] := b[255:0] - temp[511:0] := temp[511:0] >> (64*imm8[1:0]) - dst[255:0] := temp[255:0] - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Concatenate "a" and "b" into a 64-byte immediate result, shift the result right by "imm8" 64-bit elements, and store the low 32 bytes (4 elements) in "dst". + +temp[511:256] := a[255:0] +temp[255:0] := b[255:0] +temp[511:0] := temp[511:0] >> (64*imm8[1:0]) +dst[255:0] := temp[255:0] +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Concatenate "a" and "b" into a 64-byte immediate result, shift the result right - by "imm8" 64-bit elements, and store the low 32 bytes (4 elements) in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - temp[511:256] := a[255:0] - temp[255:0] := b[255:0] - temp[511:0] := temp[511:0] >> (64*imm8[1:0]) - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := temp[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Concatenate "a" and "b" into a 64-byte immediate result, shift the result right by "imm8" 64-bit elements, and store the low 32 bytes (4 elements) in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +temp[511:256] := a[255:0] +temp[255:0] := b[255:0] +temp[511:0] := temp[511:0] >> (64*imm8[1:0]) +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := temp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Concatenate "a" and "b" into a 64-byte immediate result, shift the result right - by "imm8" 64-bit elements, and store the low 32 bytes (4 elements) in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - temp[511:256] := a[255:0] - temp[255:0] := b[255:0] - temp[511:0] := temp[511:0] >> (64*imm8[1:0]) - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := temp[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Concatenate "a" and "b" into a 64-byte immediate result, shift the result right by "imm8" 64-bit elements, and store the low 32 bytes (4 elements) in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +temp[511:256] := a[255:0] +temp[255:0] := b[255:0] +temp[511:0] := temp[511:0] >> (64*imm8[1:0]) +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := temp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Concatenate "a" and "b" into a 32-byte immediate result, shift the result right - by "imm8" 64-bit elements, and store the low 16 bytes (2 elements) in "dst". - - temp[255:128] := a[127:0] - temp[127:0] := b[127:0] - temp[255:0] := temp[255:0] >> (64*imm8[0]) - dst[127:0] := temp[127:0] - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Concatenate "a" and "b" into a 32-byte immediate result, shift the result right by "imm8" 64-bit elements, and store the low 16 bytes (2 elements) in "dst". + +temp[255:128] := a[127:0] +temp[127:0] := b[127:0] +temp[255:0] := temp[255:0] >> (64*imm8[0]) +dst[127:0] := temp[127:0] +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Concatenate "a" and "b" into a 32-byte immediate result, shift the result right - by "imm8" 64-bit elements, and store the low 16 bytes (2 elements) in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - temp[255:128] := a[127:0] - temp[127:0] := b[127:0] - temp[255:0] := temp[255:0] >> (64*imm8[0]) - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := temp[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Concatenate "a" and "b" into a 32-byte immediate result, shift the result right by "imm8" 64-bit elements, and store the low 16 bytes (2 elements) in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +temp[255:128] := a[127:0] +temp[127:0] := b[127:0] +temp[255:0] := temp[255:0] >> (64*imm8[0]) +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := temp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Concatenate "a" and "b" into a 32-byte immediate result, shift the result right - by "imm8" 64-bit elements, and store the low 16 bytes (2 elements) in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - temp[255:128] := a[127:0] - temp[127:0] := b[127:0] - temp[255:0] := temp[255:0] >> (64*imm8[0]) - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := temp[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Concatenate "a" and "b" into a 32-byte immediate result, shift the result right by "imm8" 64-bit elements, and store the low 16 bytes (2 elements) in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +temp[255:128] := a[127:0] +temp[127:0] := b[127:0] +temp[255:0] := temp[255:0] >> (64*imm8[0]) +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := temp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Blend packed double-precision (64-bit) floating-point elements from "a" and "b" - using control mask "k", and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := b[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Blend packed double-precision (64-bit) floating-point elements from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := b[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Blend packed double-precision (64-bit) floating-point elements from "a" and "b" - using control mask "k", and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := b[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Blend packed double-precision (64-bit) floating-point elements from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := b[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Blend packed single-precision (32-bit) floating-point elements from "a" and "b" - using control mask "k", and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := b[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Blend packed single-precision (32-bit) floating-point elements from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Blend packed single-precision (32-bit) floating-point elements from "a" and "b" - using control mask "k", and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := b[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Blend packed single-precision (32-bit) floating-point elements from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Broadcast the 4 packed single-precision (32-bit) floating-point elements from - "a" to all elements of "dst". - - FOR j := 0 to 7 - i := j*32 - n := (j % 4)*32 - dst[i+31:i] := a[n+31:n] - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + Broadcast the 4 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*32 + n := (j % 4)*32 + dst[i+31:i] := a[n+31:n] +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Broadcast the 4 packed single-precision (32-bit) floating-point elements from - "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - n := (j % 4)*32 - IF k[j] - dst[i+31:i] := a[n+31:n] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Broadcast the 4 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + n := (j % 4)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Broadcast the 4 packed single-precision (32-bit) floating-point elements from - "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - n := (j % 4)*32 - IF k[j] - dst[i+31:i] := a[n+31:n] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Broadcast the 4 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + n := (j % 4)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Broadcast the 4 packed 32-bit integers from "a" to all elements of "dst". - - FOR j := 0 to 7 - i := j*32 - n := (j % 4)*32 - dst[i+31:i] := a[n+31:n] - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + Broadcast the 4 packed 32-bit integers from "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*32 + n := (j % 4)*32 + dst[i+31:i] := a[n+31:n] +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Broadcast the 4 packed 32-bit integers from "a" to all elements of "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*32 - n := (j % 4)*32 - IF k[j] - dst[i+31:i] := a[n+31:n] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Broadcast the 4 packed 32-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + n := (j % 4)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Broadcast the 4 packed 32-bit integers from "a" to all elements of "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - n := (j % 4)*32 - IF k[j] - dst[i+31:i] := a[n+31:n] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Broadcast the 4 packed 32-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + n := (j % 4)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Broadcast the low double-precision (64-bit) floating-point element from "a" to - all elements of "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[63:0] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Broadcast the low double-precision (64-bit) floating-point element from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Broadcast the low double-precision (64-bit) floating-point element from "a" to - all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[63:0] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Broadcast the low double-precision (64-bit) floating-point element from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Broadcast the low single-precision (32-bit) floating-point element from "a" to - all elements of "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[31:0] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Broadcast the low single-precision (32-bit) floating-point element from "a" to - all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[31:0] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Broadcast the low single-precision (32-bit) floating-point element from "a" to - all elements of "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[31:0] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Broadcast the low single-precision (32-bit) floating-point element from "a" to - all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[31:0] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Contiguously store the active double-precision (64-bit) floating-point elements - in "a" (those with their respective bit set in writemask "k") to "dst", and pass through - the remaining elements from "src". - - size := 64 - m := 0 - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[m+size-1:m] := a[i+63:i] - m := m + size - FI - ENDFOR - dst[255:m] := src[255:m] - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 64 +m := 0 +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR +dst[255:m] := src[255:m] +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Contiguously store the active double-precision (64-bit) floating-point elements - in "a" (those with their respective bit set in zeromask "k") to "dst", and set the - remaining elements to zero. - - size := 64 - m := 0 - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[m+size-1:m] := a[i+63:i] - m := m + size - FI - ENDFOR - dst[255:m] := 0 - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 64 +m := 0 +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR +dst[255:m] := 0 +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Contiguously store the active double-precision (64-bit) floating-point elements - in "a" (those with their respective bit set in writemask "k") to "dst", and pass through - the remaining elements from "src". - - size := 64 - m := 0 - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[m+size-1:m] := a[i+63:i] - m := m + size - FI - ENDFOR - dst[127:m] := src[127:m] - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 64 +m := 0 +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR +dst[127:m] := src[127:m] +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Contiguously store the active double-precision (64-bit) floating-point elements - in "a" (those with their respective bit set in zeromask "k") to "dst", and set the - remaining elements to zero. - - size := 64 - m := 0 - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[m+size-1:m] := a[i+63:i] - m := m + size - FI - ENDFOR - dst[127:m] := 0 - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 64 +m := 0 +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR +dst[127:m] := 0 +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Contiguously store the active single-precision (32-bit) floating-point elements - in "a" (those with their respective bit set in writemask "k") to "dst", and pass through - the remaining elements from "src". - - size := 32 - m := 0 - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[m+size-1:m] := a[i+31:i] - m := m + size - FI - ENDFOR - dst[255:m] := src[255:m] - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 32 +m := 0 +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR +dst[255:m] := src[255:m] +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Contiguously store the active single-precision (32-bit) floating-point elements - in "a" (those with their respective bit set in zeromask "k") to "dst", and set the - remaining elements to zero. - - size := 32 - m := 0 - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[m+size-1:m] := a[i+31:i] - m := m + size - FI - ENDFOR - dst[255:m] := 0 - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 32 +m := 0 +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR +dst[255:m] := 0 +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Contiguously store the active single-precision (32-bit) floating-point elements - in "a" (those with their respective bit set in writemask "k") to "dst", and pass through - the remaining elements from "src". - - size := 32 - m := 0 - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[m+size-1:m] := a[i+31:i] - m := m + size - FI - ENDFOR - dst[127:m] := src[127:m] - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 32 +m := 0 +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR +dst[127:m] := src[127:m] +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Contiguously store the active single-precision (32-bit) floating-point elements - in "a" (those with their respective bit set in zeromask "k") to "dst", and set the - remaining elements to zero. - - size := 32 - m := 0 - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[m+size-1:m] := a[i+31:i] - m := m + size - FI - ENDFOR - dst[127:m] := 0 - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 32 +m := 0 +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR +dst[127:m] := 0 +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Load contiguous active double-precision (64-bit) floating-point elements from - "a" (those with their respective bit set in mask "k"), and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - m := 0 - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[m+63:m] - m := m + 64 - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Load contiguous active double-precision (64-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[m+63:m] + m := m + 64 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Load contiguous active double-precision (64-bit) floating-point elements from - "a" (those with their respective bit set in mask "k"), and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[m+63:m] - m := m + 64 - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Load contiguous active double-precision (64-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[m+63:m] + m := m + 64 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Load contiguous active double-precision (64-bit) floating-point elements from - "a" (those with their respective bit set in mask "k"), and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - m := 0 - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[m+63:m] - m := m + 64 - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Load contiguous active double-precision (64-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[m+63:m] + m := m + 64 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Load contiguous active double-precision (64-bit) floating-point elements from - "a" (those with their respective bit set in mask "k"), and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[m+63:m] - m := m + 64 - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Load contiguous active double-precision (64-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[m+63:m] + m := m + 64 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Load contiguous active single-precision (32-bit) floating-point elements from - "a" (those with their respective bit set in mask "k"), and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - m := 0 - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[m+31:m] - m := m + 32 - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Load contiguous active single-precision (32-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[m+31:m] + m := m + 32 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Load contiguous active single-precision (32-bit) floating-point elements from - "a" (those with their respective bit set in mask "k"), and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[m+31:m] - m := m + 32 - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Load contiguous active single-precision (32-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[m+31:m] + m := m + 32 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Load contiguous active single-precision (32-bit) floating-point elements from - "a" (those with their respective bit set in mask "k"), and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - m := 0 - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[m+31:m] - m := m + 32 - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Load contiguous active single-precision (32-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[m+31:m] + m := m + 32 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Load contiguous active single-precision (32-bit) floating-point elements from - "a" (those with their respective bit set in mask "k"), and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[m+31:m] - m := m + 32 - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Load contiguous active single-precision (32-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[m+31:m] + m := m + 32 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point - elements) from "a", selected with "imm8", and store the result in "dst". - - CASE imm8[0] OF - 0: dst[127:0] := a[127:0] - 1: dst[127:0] := a[255:128] - ESAC - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[0] OF +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +ESAC +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point - elements) from "a", selected with "imm8", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - CASE imm8[0] OF - 0: tmp[127:0] := a[127:0] - 1: tmp[127:0] := a[255:128] - ESAC - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE imm8[0] OF +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +ESAC +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point - elements) from "a", selected with "imm8", and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - CASE imm8[0] OF - 0: tmp[127:0] := a[127:0] - 1: tmp[127:0] := a[255:128] - ESAC - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE imm8[0] OF +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +ESAC +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Extract 128 bits (composed of 4 packed 32-bit integers) from "a", selected with - "imm8", and store the result in "dst". - - CASE imm8[0] OF - 0: dst[127:0] := a[127:0] - 1: dst[127:0] := a[255:128] - ESAC - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Extract 128 bits (composed of 4 packed 32-bit integers) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[0] OF +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +ESAC +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Extract 128 bits (composed of 4 packed 32-bit integers) from "a", selected with - "imm8", and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - CASE imm8[0] OF - 0: tmp[127:0] := a[127:0] - 1: tmp[127:0] := a[255:128] - ESAC - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Extract 128 bits (composed of 4 packed 32-bit integers) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE imm8[0] OF +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +ESAC +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Extract 128 bits (composed of 4 packed 32-bit integers) from "a", selected with - "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - CASE imm8[0] OF - 0: tmp[127:0] := a[127:0] - 1: tmp[127:0] := a[255:128] - ESAC - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Extract 128 bits (composed of 4 packed 32-bit integers) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE imm8[0] OF +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +ESAC +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" - using packed 64-bit integers in "c", and store the results in "dst". "imm8" is used to - set the required flags reporting. - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { - tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] - CASE(tsrc[63:0]) OF - QNAN_TOKEN: j := 0 - SNAN_TOKEN: j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[63:0] := src1[63:0] - 1 : dest[63:0] := tsrc[63:0] - 2 : dest[63:0] := QNaN(tsrc[63:0]) - 3 : dest[63:0] := QNAN_Indefinite - 4 : dest[63:0] := -INF - 5 : dest[63:0] := +INF - 6 : dest[63:0] := tsrc.sign? -INF : +INF - 7 : dest[63:0] := -0 - 8 : dest[63:0] := +0 - 9 : dest[63:0] := -1 - 10: dest[63:0] := +1 - 11: dest[63:0] := 1/2 - 12: dest[63:0] := 90.0 - 13: dest[63:0] := PI/2 - 14: dest[63:0] := MAX_FLOAT - 15: dest[63:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[63:0] - } - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst". "imm8" is used to set the required flags reporting. + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0]) OF + QNAN_TOKEN: j := 0 + SNAN_TOKEN: j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? -INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1/2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[63:0] +} +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" - using packed 64-bit integers in "c", and store the results in "dst" using writemask "k" - (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is - used to set the required flags reporting. - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { - tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] - CASE(tsrc[63:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[63:0] := src1[63:0] - 1 : dest[63:0] := tsrc[63:0] - 2 : dest[63:0] := QNaN(tsrc[63:0]) - 3 : dest[63:0] := QNAN_Indefinite - 4 : dest[63:0] := -INF - 5 : dest[63:0] := +INF - 6 : dest[63:0] := tsrc.sign? -INF : +INF - 7 : dest[63:0] := -0 - 8 : dest[63:0] := +0 - 9 : dest[63:0] := -1 - 10: dest[63:0] := +1 - 11: dest[63:0] := 1/2 - 12: dest[63:0] := 90.0 - 13: dest[63:0] := PI/2 - 14: dest[63:0] := MAX_FLOAT - 15: dest[63:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[63:0] - } - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? -INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1/2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[63:0] +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" - using packed 64-bit integers in "c", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to - set the required flags reporting. - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { - tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] - CASE(tsrc[63:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[63:0] := src1[63:0] - 1 : dest[63:0] := tsrc[63:0] - 2 : dest[63:0] := QNaN(tsrc[63:0]) - 3 : dest[63:0] := QNAN_Indefinite - 4 : dest[63:0] := -INF - 5 : dest[63:0] := +INF - 6 : dest[63:0] := tsrc.sign? -INF : +INF - 7 : dest[63:0] := -0 - 8 : dest[63:0] := +0 - 9 : dest[63:0] := -1 - 10: dest[63:0] := +1 - 11: dest[63:0] := 1/2 - 12: dest[63:0] := 90.0 - 13: dest[63:0] := PI/2 - 14: dest[63:0] := MAX_FLOAT - 15: dest[63:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[63:0] - } - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? -INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1/2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[63:0] +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" - using packed 64-bit integers in "c", and store the results in "dst". "imm8" is used to - set the required flags reporting. - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { - tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] - CASE(tsrc[63:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[63:0] := src1[63:0] - 1 : dest[63:0] := tsrc[63:0] - 2 : dest[63:0] := QNaN(tsrc[63:0]) - 3 : dest[63:0] := QNAN_Indefinite - 4 : dest[63:0] := -INF - 5 : dest[63:0] := +INF - 6 : dest[63:0] := tsrc.sign? -INF : +INF - 7 : dest[63:0] := -0 - 8 : dest[63:0] := +0 - 9 : dest[63:0] := -1 - 10: dest[63:0] := +1 - 11: dest[63:0] := 1/2 - 12: dest[63:0] := 90.0 - 13: dest[63:0] := PI/2 - 14: dest[63:0] := MAX_FLOAT - 15: dest[63:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[63:0] - } - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst". "imm8" is used to set the required flags reporting. + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? -INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1/2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[63:0] +} +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" - using packed 64-bit integers in "c", and store the results in "dst" using writemask "k" - (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is - used to set the required flags reporting. - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { - tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] - CASE(tsrc[63:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[63:0] := src1[63:0] - 1 : dest[63:0] := tsrc[63:0] - 2 : dest[63:0] := QNaN(tsrc[63:0]) - 3 : dest[63:0] := QNAN_Indefinite - 4 : dest[63:0] := -INF - 5 : dest[63:0] := +INF - 6 : dest[63:0] := tsrc.sign? -INF : +INF - 7 : dest[63:0] := -0 - 8 : dest[63:0] := +0 - 9 : dest[63:0] := -1 - 10: dest[63:0] := +1 - 11: dest[63:0] := 1/2 - 12: dest[63:0] := 90.0 - 13: dest[63:0] := PI/2 - 14: dest[63:0] := MAX_FLOAT - 15: dest[63:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[63:0] - } - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? -INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1/2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[63:0] +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" - using packed 64-bit integers in "c", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to - set the required flags reporting. - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { - tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] - CASE(tsrc[63:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[63:0] := src1[63:0] - 1 : dest[63:0] := tsrc[63:0] - 2 : dest[63:0] := QNaN(tsrc[63:0]) - 3 : dest[63:0] := QNAN_Indefinite - 4 : dest[63:0] := -INF - 5 : dest[63:0] := +INF - 6 : dest[63:0] := tsrc.sign? -INF : +INF - 7 : dest[63:0] := -0 - 8 : dest[63:0] := +0 - 9 : dest[63:0] := -1 - 10: dest[63:0] := +1 - 11: dest[63:0] := 1/2 - 12: dest[63:0] := 90.0 - 13: dest[63:0] := PI/2 - 14: dest[63:0] := MAX_FLOAT - 15: dest[63:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[63:0] - } - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? -INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1/2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[63:0] +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" - using packed 32-bit integers in "c", and store the results in "dst". "imm8" is used to - set the required flags reporting. - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { - tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] - CASE(tsrc[31:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[31:0] := src1[31:0] - 1 : dest[31:0] := tsrc[31:0] - 2 : dest[31:0] := QNaN(tsrc[31:0]) - 3 : dest[31:0] := QNAN_Indefinite - 4 : dest[31:0] := -INF - 5 : dest[31:0] := +INF - 6 : dest[31:0] := tsrc.sign? -INF : +INF - 7 : dest[31:0] := -0 - 8 : dest[31:0] := +0 - 9 : dest[31:0] := -1 - 10: dest[31:0] := +1 - 11: dest[31:0] := 1/2 - 12: dest[31:0] := 90.0 - 13: dest[31:0] := PI/2 - 14: dest[31:0] := MAX_FLOAT - 15: dest[31:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[31:0] - } - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst". "imm8" is used to set the required flags reporting. + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? -INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1/2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[31:0] +} +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" - using packed 32-bit integers in "c", and store the results in "dst" using writemask "k" - (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is - used to set the required flags reporting. - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { - tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] - CASE(tsrc[31:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[31:0] := src1[31:0] - 1 : dest[31:0] := tsrc[31:0] - 2 : dest[31:0] := QNaN(tsrc[31:0]) - 3 : dest[31:0] := QNAN_Indefinite - 4 : dest[31:0] := -INF - 5 : dest[31:0] := +INF - 6 : dest[31:0] := tsrc.sign? -INF : +INF - 7 : dest[31:0] := -0 - 8 : dest[31:0] := +0 - 9 : dest[31:0] := -1 - 10: dest[31:0] := +1 - 11: dest[31:0] := 1/2 - 12: dest[31:0] := 90.0 - 13: dest[31:0] := PI/2 - 14: dest[31:0] := MAX_FLOAT - 15: dest[31:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[31:0] - } - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? -INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1/2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[31:0] +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" - using packed 32-bit integers in "c", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to - set the required flags reporting. - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { - tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] - CASE(tsrc[31:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[31:0] := src1[31:0] - 1 : dest[31:0] := tsrc[31:0] - 2 : dest[31:0] := QNaN(tsrc[31:0]) - 3 : dest[31:0] := QNAN_Indefinite - 4 : dest[31:0] := -INF - 5 : dest[31:0] := +INF - 6 : dest[31:0] := tsrc.sign? -INF : +INF - 7 : dest[31:0] := -0 - 8 : dest[31:0] := +0 - 9 : dest[31:0] := -1 - 10: dest[31:0] := +1 - 11: dest[31:0] := 1/2 - 12: dest[31:0] := 90.0 - 13: dest[31:0] := PI/2 - 14: dest[31:0] := MAX_FLOAT - 15: dest[31:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[31:0] - } - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? -INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1/2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[31:0] +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" - using packed 32-bit integers in "c", and store the results in "dst". "imm8" is used to - set the required flags reporting. - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { - tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] - CASE(tsrc[31:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[31:0] := src1[31:0] - 1 : dest[31:0] := tsrc[31:0] - 2 : dest[31:0] := QNaN(tsrc[31:0]) - 3 : dest[31:0] := QNAN_Indefinite - 4 : dest[31:0] := -INF - 5 : dest[31:0] := +INF - 6 : dest[31:0] := tsrc.sign? -INF : +INF - 7 : dest[31:0] := -0 - 8 : dest[31:0] := +0 - 9 : dest[31:0] := -1 - 10: dest[31:0] := +1 - 11: dest[31:0] := 1/2 - 12: dest[31:0] := 90.0 - 13: dest[31:0] := PI/2 - 14: dest[31:0] := MAX_FLOAT - 15: dest[31:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[31:0] - } - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst". "imm8" is used to set the required flags reporting. + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? -INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1/2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[31:0] +} +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" - using packed 32-bit integers in "c", and store the results in "dst" using writemask "k" - (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is - used to set the required flags reporting. - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { - tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] - CASE(tsrc[31:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[31:0] := src1[31:0] - 1 : dest[31:0] := tsrc[31:0] - 2 : dest[31:0] := QNaN(tsrc[31:0]) - 3 : dest[31:0] := QNAN_Indefinite - 4 : dest[31:0] := -INF - 5 : dest[31:0] := +INF - 6 : dest[31:0] := tsrc.sign? -INF : +INF - 7 : dest[31:0] := -0 - 8 : dest[31:0] := +0 - 9 : dest[31:0] := -1 - 10: dest[31:0] := +1 - 11: dest[31:0] := 1/2 - 12: dest[31:0] := 90.0 - 13: dest[31:0] := PI/2 - 14: dest[31:0] := MAX_FLOAT - 15: dest[31:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[31:0] - } - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? -INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1/2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[31:0] +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" - using packed 32-bit integers in "c", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to - set the required flags reporting. - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { - tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] - CASE(tsrc[31:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[31:0] := src1[31:0] - 1 : dest[31:0] := tsrc[31:0] - 2 : dest[31:0] := QNaN(tsrc[31:0]) - 3 : dest[31:0] := QNAN_Indefinite - 4 : dest[31:0] := -INF - 5 : dest[31:0] := +INF - 6 : dest[31:0] := tsrc.sign? -INF : +INF - 7 : dest[31:0] := -0 - 8 : dest[31:0] := +0 - 9 : dest[31:0] := -1 - 10: dest[31:0] := +1 - 11: dest[31:0] := 1/2 - 12: dest[31:0] := 90.0 - 13: dest[31:0] := PI/2 - 14: dest[31:0] := MAX_FLOAT - 15: dest[31:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[31:0] - } - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? -INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1/2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[31:0] +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Convert the exponent of each packed double-precision (64-bit) floating-point - element in "a" to a double-precision (64-bit) floating-point number representing the - integer exponent, and store the results in "dst". This intrinsic essentially calculates - "floor(log2(x))" for each element. - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := ConvertExpFP64(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Convert the exponent of each packed double-precision (64-bit) floating-point - element in "a" to a double-precision (64-bit) floating-point number representing the - integer exponent, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). This intrinsic - essentially calculates "floor(log2(x))" for each element. - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := ConvertExpFP64(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Convert the exponent of each packed double-precision (64-bit) floating-point - element in "a" to a double-precision (64-bit) floating-point number representing the - integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). This intrinsic essentially calculates - "floor(log2(x))" for each element. - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := ConvertExpFP64(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Convert the exponent of each packed double-precision (64-bit) floating-point - element in "a" to a double-precision (64-bit) floating-point number representing the - integer exponent, and store the results in "dst". This intrinsic essentially calculates - "floor(log2(x))" for each element. - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := ConvertExpFP64(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Convert the exponent of each packed double-precision (64-bit) floating-point - element in "a" to a double-precision (64-bit) floating-point number representing the - integer exponent, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). This intrinsic - essentially calculates "floor(log2(x))" for each element. - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := ConvertExpFP64(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Convert the exponent of each packed double-precision (64-bit) floating-point - element in "a" to a double-precision (64-bit) floating-point number representing the - integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). This intrinsic essentially calculates - "floor(log2(x))" for each element. - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := ConvertExpFP64(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Convert the exponent of each packed single-precision (32-bit) floating-point - element in "a" to a single-precision (32-bit) floating-point number representing the - integer exponent, and store the results in "dst". This intrinsic essentially calculates - "floor(log2(x))" for each element. - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := ConvertExpFP32(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Convert the exponent of each packed single-precision (32-bit) floating-point - element in "a" to a single-precision (32-bit) floating-point number representing the - integer exponent, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). This intrinsic - essentially calculates "floor(log2(x))" for each element. - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := ConvertExpFP32(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Convert the exponent of each packed single-precision (32-bit) floating-point - element in "a" to a single-precision (32-bit) floating-point number representing the - integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). This intrinsic essentially calculates - "floor(log2(x))" for each element. - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := ConvertExpFP32(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Convert the exponent of each packed single-precision (32-bit) floating-point - element in "a" to a single-precision (32-bit) floating-point number representing the - integer exponent, and store the results in "dst". This intrinsic essentially calculates - "floor(log2(x))" for each element. - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ConvertExpFP32(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Convert the exponent of each packed single-precision (32-bit) floating-point - element in "a" to a single-precision (32-bit) floating-point number representing the - integer exponent, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). This intrinsic - essentially calculates "floor(log2(x))" for each element. - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := ConvertExpFP32(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Convert the exponent of each packed single-precision (32-bit) floating-point - element in "a" to a single-precision (32-bit) floating-point number representing the - integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). This intrinsic essentially calculates - "floor(log2(x))" for each element. - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := ConvertExpFP32(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Normalize the mantissas of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst". This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by - "interv" and the sign depends on "sc" and the source sign. - [getmant_note] - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Normalize the mantissas of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). This intrinsic essentially - calculates "±(2^k)*|x.significand|", where "k" depends on the interval range - defined by "interv" and the sign depends on "sc" and the source sign. - [getmant_note] - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Normalize the mantissas of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by - "interv" and the sign depends on "sc" and the source sign. - [getmant_note] - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Normalize the mantissas of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst". This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by - "interv" and the sign depends on "sc" and the source sign. - [getmant_note] - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Normalize the mantissas of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). This intrinsic essentially - calculates "±(2^k)*|x.significand|", where "k" depends on the interval range - defined by "interv" and the sign depends on "sc" and the source sign. - [getmant_note] - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Normalize the mantissas of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by - "interv" and the sign depends on "sc" and the source sign. - [getmant_note] - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Normalize the mantissas of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst". This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by - "interv" and the sign depends on "sc" and the source sign. - [getmant_note] - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Normalize the mantissas of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). This intrinsic essentially - calculates "±(2^k)*|x.significand|", where "k" depends on the interval range - defined by "interv" and the sign depends on "sc" and the source sign. - [getmant_note] - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Normalize the mantissas of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by - "interv" and the sign depends on "sc" and the source sign. - [getmant_note] - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Normalize the mantissas of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst". This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by - "interv" and the sign depends on "sc" and the source sign. - [getmant_note] - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Normalize the mantissas of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). This intrinsic essentially - calculates "±(2^k)*|x.significand|", where "k" depends on the interval range - defined by "interv" and the sign depends on "sc" and the source sign. - [getmant_note] - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Normalize the mantissas of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by - "interv" and the sign depends on "sc" and the source sign. - [getmant_note] - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Copy "a" to "dst", then insert 128 bits (composed of 4 packed single-precision - (32-bit) floating-point elements) from "b" into "dst" at the location specified by - "imm8". - - dst[255:0] := a[255:0] - CASE (imm8[0]) OF - 0: dst[127:0] := b[127:0] - 1: dst[255:128] := b[127:0] - ESAC - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Copy "a" to "dst", then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "b" into "dst" at the location specified by "imm8". + +dst[255:0] := a[255:0] +CASE (imm8[0]) OF +0: dst[127:0] := b[127:0] +1: dst[255:128] := b[127:0] +ESAC +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Copy "a" to "tmp", then insert 128 bits (composed of 4 packed single-precision - (32-bit) floating-point elements) from "b" into "tmp" at the location specified by - "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - tmp[255:0] := a[255:0] - CASE (imm8[0]) OF - 0: tmp[127:0] := b[127:0] - 1: tmp[255:128] := b[127:0] - ESAC - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[255:0] := a[255:0] +CASE (imm8[0]) OF +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +ESAC +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Copy "a" to "tmp", then insert 128 bits (composed of 4 packed single-precision - (32-bit) floating-point elements) from "b" into "tmp" at the location specified by - "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - tmp[255:0] := a[255:0] - CASE (imm8[0]) OF - 0: tmp[127:0] := b[127:0] - 1: tmp[255:128] := b[127:0] - ESAC - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[255:0] := a[255:0] +CASE (imm8[0]) OF +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +ESAC +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Copy "a" to "dst", then insert 128 bits (composed of 4 packed 32-bit integers) - from "b" into "dst" at the location specified by "imm8". - - dst[255:0] := a[255:0] - CASE (imm8[0]) OF - 0: dst[127:0] := b[127:0] - 1: dst[255:128] := b[127:0] - ESAC - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Copy "a" to "dst", then insert 128 bits (composed of 4 packed 32-bit integers) from "b" into "dst" at the location specified by "imm8". + +dst[255:0] := a[255:0] +CASE (imm8[0]) OF +0: dst[127:0] := b[127:0] +1: dst[255:128] := b[127:0] +ESAC +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Copy "a" to "tmp", then insert 128 bits (composed of 4 packed 32-bit integers) - from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - tmp[255:0] := a[255:0] - CASE (imm8[0]) OF - 0: tmp[127:0] := b[127:0] - 1: tmp[255:128] := b[127:0] - ESAC - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 4 packed 32-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[255:0] := a[255:0] +CASE (imm8[0]) OF +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +ESAC +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Copy "a" to "tmp", then insert 128 bits (composed of 4 packed 32-bit integers) - from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - tmp[255:0] := a[255:0] - CASE (imm8[0]) OF - 0: tmp[127:0] := b[127:0] - 1: tmp[255:128] := b[127:0] - ESAC - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 4 packed 32-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[255:0] := a[255:0] +CASE (imm8[0]) OF +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +ESAC +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Blend packed 32-bit integers from "a" and "b" using control mask "k", and store - the results in "dst". - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := b[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Blend packed 32-bit integers from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Blend packed 32-bit integers from "a" and "b" using control mask "k", and store - the results in "dst". - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := b[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Blend packed 32-bit integers from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Blend packed 64-bit integers from "a" and "b" using control mask "k", and store - the results in "dst". - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := b[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Blend packed 64-bit integers from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := b[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Blend packed 64-bit integers from "a" and "b" using control mask "k", and store - the results in "dst". - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := b[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Blend packed 64-bit integers from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := b[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Broadcast the low packed 32-bit integer from "a" to all elements of "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[31:0] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Broadcast the low packed 32-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Broadcast the low packed 32-bit integer from "a" to all elements of "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[31:0] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Broadcast the low packed 32-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Broadcast the low packed 32-bit integer from "a" to all elements of "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[31:0] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Broadcast the low packed 32-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Broadcast the low packed 32-bit integer from "a" to all elements of "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[31:0] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Broadcast the low packed 32-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Broadcast the low packed 64-bit integer from "a" to all elements of "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[63:0] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Broadcast the low packed 64-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Broadcast the low packed 64-bit integer from "a" to all elements of "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[63:0] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Broadcast the low packed 64-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Broadcast the low packed 64-bit integer from "a" to all elements of "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[63:0] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Broadcast the low packed 64-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Broadcast the low packed 64-bit integer from "a" to all elements of "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[63:0] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Broadcast the low packed 64-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Contiguously store the active 32-bit integers in "a" (those with their - respective bit set in writemask "k") to "dst", and pass through the remaining elements - from "src". - - size := 32 - m := 0 - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[m+size-1:m] := a[i+31:i] - m := m + size - FI - ENDFOR - dst[255:m] := src[255:m] - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Contiguously store the active 32-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 32 +m := 0 +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR +dst[255:m] := src[255:m] +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Contiguously store the active 32-bit integers in "a" (those with their - respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. - - size := 32 - m := 0 - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[m+size-1:m] := a[i+31:i] - m := m + size - FI - ENDFOR - dst[255:m] := 0 - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Contiguously store the active 32-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 32 +m := 0 +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR +dst[255:m] := 0 +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Contiguously store the active 32-bit integers in "a" (those with their - respective bit set in writemask "k") to "dst", and pass through the remaining elements - from "src". - - size := 32 - m := 0 - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[m+size-1:m] := a[i+31:i] - m := m + size - FI - ENDFOR - dst[127:m] := src[127:m] - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Contiguously store the active 32-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 32 +m := 0 +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR +dst[127:m] := src[127:m] +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Contiguously store the active 32-bit integers in "a" (those with their - respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. - - size := 32 - m := 0 - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[m+size-1:m] := a[i+31:i] - m := m + size - FI - ENDFOR - dst[127:m] := 0 - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Contiguously store the active 32-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 32 +m := 0 +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR +dst[127:m] := 0 +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Contiguously store the active 64-bit integers in "a" (those with their - respective bit set in writemask "k") to "dst", and pass through the remaining elements - from "src". - - size := 64 - m := 0 - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[m+size-1:m] := a[i+63:i] - m := m + size - FI - ENDFOR - dst[255:m] := src[255:m] - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Contiguously store the active 64-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 64 +m := 0 +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR +dst[255:m] := src[255:m] +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Contiguously store the active 64-bit integers in "a" (those with their - respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. - - size := 64 - m := 0 - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[m+size-1:m] := a[i+63:i] - m := m + size - FI - ENDFOR - dst[255:m] := 0 - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Contiguously store the active 64-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 64 +m := 0 +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR +dst[255:m] := 0 +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Contiguously store the active 64-bit integers in "a" (those with their - respective bit set in writemask "k") to "dst", and pass through the remaining elements - from "src". - - size := 64 - m := 0 - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[m+size-1:m] := a[i+63:i] - m := m + size - FI - ENDFOR - dst[127:m] := src[127:m] - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Contiguously store the active 64-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 64 +m := 0 +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR +dst[127:m] := src[127:m] +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Contiguously store the active 64-bit integers in "a" (those with their - respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. - - size := 64 - m := 0 - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[m+size-1:m] := a[i+63:i] - m := m + size - FI - ENDFOR - dst[127:m] := 0 - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Contiguously store the active 64-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 64 +m := 0 +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR +dst[127:m] := 0 +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 32-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - id := idx[i+2:i]*32 - IF k[j] - dst[i+31:i] := a[id+31:id] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + id := idx[i+2:i]*32 + IF k[j] + dst[i+31:i] := a[id+31:id] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle 32-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - id := idx[i+2:i]*32 - IF k[j] - dst[i+31:i] := a[id+31:id] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + id := idx[i+2:i]*32 + IF k[j] + dst[i+31:i] := a[id+31:id] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Shuffle 32-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - id := idx[i+2:i]*32 - dst[i+31:i] := a[id+31:id] - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + id := idx[i+2:i]*32 + dst[i+31:i] := a[id+31:id] +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst" using writemask "k" - (elements are copied from "idx" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - off := idx[i+2:i]*32 - IF k[j] - dst[i+31:i] := idx[i+3] ? b[off+31:off] : a[off+31:off] - ELSE - dst[i+31:i] := idx[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + off := idx[i+2:i]*32 + IF k[j] + dst[i+31:i] := idx[i+3] ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := idx[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst" using writemask "k" - (elements are copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - off := idx[i+2:i]*32 - IF k[j] - dst[i+31:i] := idx[i+3] ? b[off+31:off] : a[off+31:off] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + off := idx[i+2:i]*32 + IF k[j] + dst[i+31:i] := idx[i+3] ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - off := idx[i+2:i]*32 - IF k[j] - dst[i+31:i] := (idx[i+3]) ? b[off+31:off] : a[off+31:off] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + off := idx[i+2:i]*32 + IF k[j] + dst[i+31:i] := (idx[i+3]) ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - off := idx[i+2:i]*32 - dst[i+31:i] := idx[i+3] ? b[off+31:off] : a[off+31:off] - ENDFOR - dst[MAX:256] := 0 - - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + off := idx[i+2:i]*32 + dst[i+31:i] := idx[i+3] ? b[off+31:off] : a[off+31:off] +ENDFOR +dst[MAX:256] := 0 + + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 32-bit integers in "a" and "b" using the corresponding selector and - index in "idx", and store the results in "dst" using writemask "k" (elements are copied - from "idx" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - off := idx[i+1:i]*32 - IF k[j] - dst[i+31:i] := idx[i+2] ? b[off+31:off] : a[off+31:off] - ELSE - dst[i+31:i] := idx[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 32-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + off := idx[i+1:i]*32 + IF k[j] + dst[i+31:i] := idx[i+2] ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := idx[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 32-bit integers in "a" and "b" using the corresponding selector and - index in "idx", and store the results in "dst" using writemask "k" (elements are copied - from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - off := idx[i+1:i]*32 - IF k[j] - dst[i+31:i] := idx[i+2] ? b[off+31:off] : a[off+31:off] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 32-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + off := idx[i+1:i]*32 + IF k[j] + dst[i+31:i] := idx[i+2] ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 32-bit integers in "a" and "b" using the corresponding selector and - index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - off := idx[i+1:i]*32 - IF k[j] - dst[i+31:i] := (idx[i+2]) ? b[off+31:off] : a[off+31:off] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 32-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + off := idx[i+1:i]*32 + IF k[j] + dst[i+31:i] := (idx[i+2]) ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle 32-bit integers in "a" and "b" using the corresponding selector and - index in "idx", and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - off := idx[i+1:i]*32 - dst[i+31:i] := idx[i+2] ? b[off+31:off] : a[off+31:off] - ENDFOR - dst[MAX:128] := 0 - - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle 32-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + off := idx[i+1:i]*32 + dst[i+31:i] := idx[i+2] ? b[off+31:off] : a[off+31:off] +ENDFOR +dst[MAX:128] := 0 + + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across - lanes using the corresponding selector and index in "idx", and store the results in - "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*64 - off := idx[i+1:i]*64 - IF k[j] - dst[i+63:i] := idx[i+2] ? b[off+63:off] : a[off+63:off] - ELSE - dst[i+63:i] := idx[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + off := idx[i+1:i]*64 + IF k[j] + dst[i+63:i] := idx[i+2] ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := idx[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across - lanes using the corresponding selector and index in "idx", and store the results in - "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit - is not set). - - FOR j := 0 to 3 - i := j*64 - off := idx[i+1:i]*64 - IF k[j] - dst[i+63:i] := idx[i+2] ? b[off+63:off] : a[off+63:off] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + off := idx[i+1:i]*64 + IF k[j] + dst[i+63:i] := idx[i+2] ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across - lanes using the corresponding selector and index in "idx", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - i := j*64 - off := idx[i+1:i]*64 - IF k[j] - dst[i+63:i] := (idx[i+2]) ? b[off+63:off] : a[off+63:off] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + off := idx[i+1:i]*64 + IF k[j] + dst[i+63:i] := (idx[i+2]) ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across - lanes using the corresponding selector and index in "idx", and store the results in - "dst". - - FOR j := 0 to 3 - i := j*64 - off := idx[i+1:i]*64 - dst[i+63:i] := idx[i+2] ? b[off+63:off] : a[off+63:off] - ENDFOR - dst[MAX:256] := 0 - - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + off := idx[i+1:i]*64 + dst[i+63:i] := idx[i+2] ? b[off+63:off] : a[off+63:off] +ENDFOR +dst[MAX:256] := 0 + + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle double-precision (64-bit) floating-point elements in "a" and "b" using - the corresponding selector and index in "idx", and store the results in "dst" using - writemask "k" (elements are copied from "idx" when the corresponding mask bit is not - set) - - FOR j := 0 to 1 - i := j*64 - off := idx[i]*64 - IF k[j] - dst[i+63:i] := idx[i+1] ? b[off+63:off] : a[off+63:off] - ELSE - dst[i+63:i] := idx[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set) + +FOR j := 0 to 1 + i := j*64 + off := idx[i]*64 + IF k[j] + dst[i+63:i] := idx[i+1] ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := idx[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle double-precision (64-bit) floating-point elements in "a" and "b" using - the corresponding selector and index in "idx", and store the results in "dst" using - writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - off := idx[i]*64 - IF k[j] - dst[i+63:i] := idx[i+1] ? b[off+63:off] : a[off+63:off] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + off := idx[i]*64 + IF k[j] + dst[i+63:i] := idx[i+1] ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle double-precision (64-bit) floating-point elements in "a" and "b" using - the corresponding selector and index in "idx", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - off := idx[i]*64 - IF k[j] - dst[i+63:i] := (idx[i+1]) ? b[off+63:off] : a[off+63:off] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + off := idx[i]*64 + IF k[j] + dst[i+63:i] := (idx[i+1]) ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle double-precision (64-bit) floating-point elements in "a" and "b" using - the corresponding selector and index in "idx", and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - off := idx[i]*64 - dst[i+63:i] := idx[i+1] ? b[off+63:off] : a[off+63:off] - ENDFOR - dst[MAX:128] := 0 - - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + off := idx[i]*64 + dst[i+63:i] := idx[i+1] ? b[off+63:off] : a[off+63:off] +ENDFOR +dst[MAX:128] := 0 + + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across - lanes using the corresponding selector and index in "idx", and store the results in - "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*32 - off := idx[i+2:i]*32 - IF k[j] - dst[i+31:i] := idx[i+3] ? b[off+31:off] : a[off+31:off] - ELSE - dst[i+31:i] := idx[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + off := idx[i+2:i]*32 + IF k[j] + dst[i+31:i] := idx[i+3] ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := idx[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across - lanes using the corresponding selector and index in "idx", and store the results in - "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit - is not set). - - FOR j := 0 to 7 - i := j*32 - off := idx[i+2:i]*32 - IF k[j] - dst[i+31:i] := idx[i+3] ? b[off+31:off] : a[off+31:off] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + off := idx[i+2:i]*32 + IF k[j] + dst[i+31:i] := idx[i+3] ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across - lanes using the corresponding selector and index in "idx", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*32 - off := idx[i+2:i]*32 - IF k[j] - dst[i+31:i] := (idx[i+3]) ? b[off+31:off] : a[off+31:off] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + off := idx[i+2:i]*32 + IF k[j] + dst[i+31:i] := (idx[i+3]) ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across - lanes using the corresponding selector and index in "idx", and store the results in - "dst". - - FOR j := 0 to 7 - i := j*32 - off := idx[i+2:i]*32 - dst[i+31:i] := idx[i+3] ? b[off+31:off] : a[off+31:off] - ENDFOR - dst[MAX:256] := 0 - - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + off := idx[i+2:i]*32 + dst[i+31:i] := idx[i+3] ? b[off+31:off] : a[off+31:off] +ENDFOR +dst[MAX:256] := 0 + + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle single-precision (32-bit) floating-point elements in "a" and "b" using - the corresponding selector and index in "idx", and store the results in "dst" using - writemask "k" (elements are copied from "idx" when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - i := j*32 - off := idx[i+1:i]*32 - IF k[j] - dst[i+31:i] := idx[i+2] ? b[off+31:off] : a[off+31:off] - ELSE - dst[i+31:i] := idx[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + off := idx[i+1:i]*32 + IF k[j] + dst[i+31:i] := idx[i+2] ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := idx[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle single-precision (32-bit) floating-point elements in "a" and "b" using - the corresponding selector and index in "idx", and store the results in "dst" using - writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - off := idx[i+1:i]*32 - IF k[j] - dst[i+31:i] := idx[i+2] ? b[off+31:off] : a[off+31:off] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + off := idx[i+1:i]*32 + IF k[j] + dst[i+31:i] := idx[i+2] ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle single-precision (32-bit) floating-point elements in "a" and "b" using - the corresponding selector and index in "idx", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - off := idx[i+1:i]*32 - IF k[j] - dst[i+31:i] := (idx[i+2]) ? b[off+31:off] : a[off+31:off] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + off := idx[i+1:i]*32 + IF k[j] + dst[i+31:i] := (idx[i+2]) ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle single-precision (32-bit) floating-point elements in "a" and "b" using - the corresponding selector and index in "idx", and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - off := idx[i+1:i]*32 - dst[i+31:i] := idx[i+2] ? b[off+31:off] : a[off+31:off] - ENDFOR - dst[MAX:128] := 0 - - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + off := idx[i+1:i]*32 + dst[i+31:i] := idx[i+2] ? b[off+31:off] : a[off+31:off] +ENDFOR +dst[MAX:128] := 0 + + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst" using writemask "k" - (elements are copied from "idx" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - off := idx[i+1:i]*64 - IF k[j] - dst[i+63:i] := idx[i+2] ? b[off+63:off] : a[off+63:off] - ELSE - dst[i+63:i] := idx[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + off := idx[i+1:i]*64 + IF k[j] + dst[i+63:i] := idx[i+2] ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := idx[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst" using writemask "k" - (elements are copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - off := idx[i+1:i]*64 - IF k[j] - dst[i+63:i] := idx[i+2] ? b[off+63:off] : a[off+63:off] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + off := idx[i+1:i]*64 + IF k[j] + dst[i+63:i] := idx[i+2] ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - off := idx[i+1:i]*64 - IF k[j] - dst[i+63:i] := (idx[i+2]) ? b[off+63:off] : a[off+63:off] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + off := idx[i+1:i]*64 + IF k[j] + dst[i+63:i] := (idx[i+2]) ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - off := idx[i+1:i]*64 - dst[i+63:i] := idx[i+2] ? b[off+63:off] : a[off+63:off] - ENDFOR - dst[MAX:256] := 0 - - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + off := idx[i+1:i]*64 + dst[i+63:i] := idx[i+2] ? b[off+63:off] : a[off+63:off] +ENDFOR +dst[MAX:256] := 0 + + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 64-bit integers in "a" and "b" using the corresponding selector and - index in "idx", and store the results in "dst" using writemask "k" (elements are copied - from "idx" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - off := idx[i]*64 - IF k[j] - dst[i+63:i] := idx[i+1] ? b[off+63:off] : a[off+63:off] - ELSE - dst[i+63:i] := idx[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 64-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + off := idx[i]*64 + IF k[j] + dst[i+63:i] := idx[i+1] ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := idx[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 64-bit integers in "a" and "b" using the corresponding selector and - index in "idx", and store the results in "dst" using writemask "k" (elements are copied - from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - off := idx[i]*64 - IF k[j] - dst[i+63:i] := idx[i+1] ? b[off+63:off] : a[off+63:off] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 64-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + off := idx[i]*64 + IF k[j] + dst[i+63:i] := idx[i+1] ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 64-bit integers in "a" and "b" using the corresponding selector and - index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - off := idx[i]*64 - IF k[j] - dst[i+63:i] := (idx[i+1]) ? b[off+63:off] : a[off+63:off] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 64-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + off := idx[i]*64 + IF k[j] + dst[i+63:i] := (idx[i+1]) ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle 64-bit integers in "a" and "b" using the corresponding selector and - index in "idx", and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - off := idx[i]*64 - dst[i+63:i] := idx[i+1] ? b[off+63:off] : a[off+63:off] - ENDFOR - dst[MAX:128] := 0 - - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle 64-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + off := idx[i]*64 + dst[i+63:i] := idx[i+1] ? b[off+63:off] : a[off+63:off] +ENDFOR +dst[MAX:128] := 0 + + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit - lanes using the control in "imm8", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - IF (imm8[0] == 0) tmp_dst[63:0] := a[63:0]; FI - IF (imm8[0] == 1) tmp_dst[63:0] := a[127:64]; FI - IF (imm8[1] == 0) tmp_dst[127:64] := a[63:0]; FI - IF (imm8[1] == 1) tmp_dst[127:64] := a[127:64]; FI - IF (imm8[2] == 0) tmp_dst[191:128] := a[191:128]; FI - IF (imm8[2] == 1) tmp_dst[191:128] := a[255:192]; FI - IF (imm8[3] == 0) tmp_dst[255:192] := a[191:128]; FI - IF (imm8[3] == 1) tmp_dst[255:192] := a[255:192]; FI - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +IF (imm8[0] == 0) tmp_dst[63:0] := a[63:0]; FI +IF (imm8[0] == 1) tmp_dst[63:0] := a[127:64]; FI +IF (imm8[1] == 0) tmp_dst[127:64] := a[63:0]; FI +IF (imm8[1] == 1) tmp_dst[127:64] := a[127:64]; FI +IF (imm8[2] == 0) tmp_dst[191:128] := a[191:128]; FI +IF (imm8[2] == 1) tmp_dst[191:128] := a[255:192]; FI +IF (imm8[3] == 0) tmp_dst[255:192] := a[191:128]; FI +IF (imm8[3] == 1) tmp_dst[255:192] := a[255:192]; FI +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit - lanes using the control in "b", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - IF (b[1] == 0) tmp_dst[63:0] := a[63:0]; FI - IF (b[1] == 1) tmp_dst[63:0] := a[127:64]; FI - IF (b[65] == 0) tmp_dst[127:64] := a[63:0]; FI - IF (b[65] == 1) tmp_dst[127:64] := a[127:64]; FI - IF (b[129] == 0) tmp_dst[191:128] := a[191:128]; FI - IF (b[129] == 1) tmp_dst[191:128] := a[255:192]; FI - IF (b[193] == 0) tmp_dst[255:192] := a[191:128]; FI - IF (b[193] == 1) tmp_dst[255:192] := a[255:192]; FI - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +IF (b[1] == 0) tmp_dst[63:0] := a[63:0]; FI +IF (b[1] == 1) tmp_dst[63:0] := a[127:64]; FI +IF (b[65] == 0) tmp_dst[127:64] := a[63:0]; FI +IF (b[65] == 1) tmp_dst[127:64] := a[127:64]; FI +IF (b[129] == 0) tmp_dst[191:128] := a[191:128]; FI +IF (b[129] == 1) tmp_dst[191:128] := a[255:192]; FI +IF (b[193] == 0) tmp_dst[255:192] := a[191:128]; FI +IF (b[193] == 1) tmp_dst[255:192] := a[255:192]; FI +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit - lanes using the control in "imm8", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - IF (imm8[0] == 0) tmp_dst[63:0] := a[63:0]; FI - IF (imm8[0] == 1) tmp_dst[63:0] := a[127:64]; FI - IF (imm8[1] == 0) tmp_dst[127:64] := a[63:0]; FI - IF (imm8[1] == 1) tmp_dst[127:64] := a[127:64]; FI - IF (imm8[2] == 0) tmp_dst[191:128] := a[191:128]; FI - IF (imm8[2] == 1) tmp_dst[191:128] := a[255:192]; FI - IF (imm8[3] == 0) tmp_dst[255:192] := a[191:128]; FI - IF (imm8[3] == 1) tmp_dst[255:192] := a[255:192]; FI - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +IF (imm8[0] == 0) tmp_dst[63:0] := a[63:0]; FI +IF (imm8[0] == 1) tmp_dst[63:0] := a[127:64]; FI +IF (imm8[1] == 0) tmp_dst[127:64] := a[63:0]; FI +IF (imm8[1] == 1) tmp_dst[127:64] := a[127:64]; FI +IF (imm8[2] == 0) tmp_dst[191:128] := a[191:128]; FI +IF (imm8[2] == 1) tmp_dst[191:128] := a[255:192]; FI +IF (imm8[3] == 0) tmp_dst[255:192] := a[191:128]; FI +IF (imm8[3] == 1) tmp_dst[255:192] := a[255:192]; FI +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit - lanes using the control in "b", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - IF (b[1] == 0) tmp_dst[63:0] := a[63:0]; FI - IF (b[1] == 1) tmp_dst[63:0] := a[127:64]; FI - IF (b[65] == 0) tmp_dst[127:64] := a[63:0]; FI - IF (b[65] == 1) tmp_dst[127:64] := a[127:64]; FI - IF (b[129] == 0) tmp_dst[191:128] := a[191:128]; FI - IF (b[129] == 1) tmp_dst[191:128] := a[255:192]; FI - IF (b[193] == 0) tmp_dst[255:192] := a[191:128]; FI - IF (b[193] == 1) tmp_dst[255:192] := a[255:192]; FI - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +IF (b[1] == 0) tmp_dst[63:0] := a[63:0]; FI +IF (b[1] == 1) tmp_dst[63:0] := a[127:64]; FI +IF (b[65] == 0) tmp_dst[127:64] := a[63:0]; FI +IF (b[65] == 1) tmp_dst[127:64] := a[127:64]; FI +IF (b[129] == 0) tmp_dst[191:128] := a[191:128]; FI +IF (b[129] == 1) tmp_dst[191:128] := a[255:192]; FI +IF (b[193] == 0) tmp_dst[255:192] := a[191:128]; FI +IF (b[193] == 1) tmp_dst[255:192] := a[255:192]; FI +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle double-precision (64-bit) floating-point elements in "a" using the - control in "imm8", and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - IF (imm8[0] == 0) tmp_dst[63:0] := a[63:0]; FI - IF (imm8[0] == 1) tmp_dst[63:0] := a[127:64]; FI - IF (imm8[1] == 0) tmp_dst[127:64] := a[63:0]; FI - IF (imm8[1] == 1) tmp_dst[127:64] := a[127:64]; FI - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +IF (imm8[0] == 0) tmp_dst[63:0] := a[63:0]; FI +IF (imm8[0] == 1) tmp_dst[63:0] := a[127:64]; FI +IF (imm8[1] == 0) tmp_dst[127:64] := a[63:0]; FI +IF (imm8[1] == 1) tmp_dst[127:64] := a[127:64]; FI +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle double-precision (64-bit) floating-point elements in "a" using the - control in "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - IF (b[1] == 0) tmp_dst[63:0] := a[63:0]; FI - IF (b[1] == 1) tmp_dst[63:0] := a[127:64]; FI - IF (b[65] == 0) tmp_dst[127:64] := a[63:0]; FI - IF (b[65] == 1) tmp_dst[127:64] := a[127:64]; FI - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" using the control in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +IF (b[1] == 0) tmp_dst[63:0] := a[63:0]; FI +IF (b[1] == 1) tmp_dst[63:0] := a[127:64]; FI +IF (b[65] == 0) tmp_dst[127:64] := a[63:0]; FI +IF (b[65] == 1) tmp_dst[127:64] := a[127:64]; FI +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle double-precision (64-bit) floating-point elements in "a" using the - control in "imm8", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - IF (imm8[0] == 0) tmp_dst[63:0] := a[63:0]; FI - IF (imm8[0] == 1) tmp_dst[63:0] := a[127:64]; FI - IF (imm8[1] == 0) tmp_dst[127:64] := a[63:0]; FI - IF (imm8[1] == 1) tmp_dst[127:64] := a[127:64]; FI - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +IF (imm8[0] == 0) tmp_dst[63:0] := a[63:0]; FI +IF (imm8[0] == 1) tmp_dst[63:0] := a[127:64]; FI +IF (imm8[1] == 0) tmp_dst[127:64] := a[63:0]; FI +IF (imm8[1] == 1) tmp_dst[127:64] := a[127:64]; FI +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle double-precision (64-bit) floating-point elements in "a" using the - control in "b", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - IF (b[1] == 0) tmp_dst[63:0] := a[63:0]; FI - IF (b[1] == 1) tmp_dst[63:0] := a[127:64]; FI - IF (b[65] == 0) tmp_dst[127:64] := a[63:0]; FI - IF (b[65] == 1) tmp_dst[127:64] := a[127:64]; FI - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" using the control in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +IF (b[1] == 0) tmp_dst[63:0] := a[63:0]; FI +IF (b[1] == 1) tmp_dst[63:0] := a[127:64]; FI +IF (b[65] == 0) tmp_dst[127:64] := a[63:0]; FI +IF (b[65] == 1) tmp_dst[127:64] := a[127:64]; FI +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit - lanes using the control in "imm8", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) - tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) - tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) - tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) - tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4]) - tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6]) - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4]) +tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6]) +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit - lanes using the control in "b", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - tmp_dst[31:0] := SELECT4(a[127:0], b[1:0]) - tmp_dst[63:32] := SELECT4(a[127:0], b[33:32]) - tmp_dst[95:64] := SELECT4(a[127:0], b[65:64]) - tmp_dst[127:96] := SELECT4(a[127:0], b[97:96]) - tmp_dst[159:128] := SELECT4(a[255:128], b[129:128]) - tmp_dst[191:160] := SELECT4(a[255:128], b[161:160]) - tmp_dst[223:192] := SELECT4(a[255:128], b[193:192]) - tmp_dst[255:224] := SELECT4(a[255:128], b[225:224]) - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +tmp_dst[31:0] := SELECT4(a[127:0], b[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], b[33:32]) +tmp_dst[95:64] := SELECT4(a[127:0], b[65:64]) +tmp_dst[127:96] := SELECT4(a[127:0], b[97:96]) +tmp_dst[159:128] := SELECT4(a[255:128], b[129:128]) +tmp_dst[191:160] := SELECT4(a[255:128], b[161:160]) +tmp_dst[223:192] := SELECT4(a[255:128], b[193:192]) +tmp_dst[255:224] := SELECT4(a[255:128], b[225:224]) +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit - lanes using the control in "imm8", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) - tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) - tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) - tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) - tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4]) - tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6]) - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4]) +tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6]) +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit - lanes using the control in "b", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - tmp_dst[31:0] := SELECT4(a[127:0], b[1:0]) - tmp_dst[63:32] := SELECT4(a[127:0], b[33:32]) - tmp_dst[95:64] := SELECT4(a[127:0], b[65:64]) - tmp_dst[127:96] := SELECT4(a[127:0], b[97:96]) - tmp_dst[159:128] := SELECT4(a[255:128], b[129:128]) - tmp_dst[191:160] := SELECT4(a[255:128], b[161:160]) - tmp_dst[223:192] := SELECT4(a[255:128], b[193:192]) - tmp_dst[255:224] := SELECT4(a[255:128], b[225:224]) - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +tmp_dst[31:0] := SELECT4(a[127:0], b[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], b[33:32]) +tmp_dst[95:64] := SELECT4(a[127:0], b[65:64]) +tmp_dst[127:96] := SELECT4(a[127:0], b[97:96]) +tmp_dst[159:128] := SELECT4(a[255:128], b[129:128]) +tmp_dst[191:160] := SELECT4(a[255:128], b[161:160]) +tmp_dst[223:192] := SELECT4(a[255:128], b[193:192]) +tmp_dst[255:224] := SELECT4(a[255:128], b[225:224]) +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle single-precision (32-bit) floating-point elements in "a" using the - control in "imm8", and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) - tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle single-precision (32-bit) floating-point elements in "a" using the - control in "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - tmp_dst[31:0] := SELECT4(a[127:0], b[1:0]) - tmp_dst[63:32] := SELECT4(a[127:0], b[33:32]) - tmp_dst[95:64] := SELECT4(a[127:0], b[65:64]) - tmp_dst[127:96] := SELECT4(a[127:0], b[97:96]) - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +tmp_dst[31:0] := SELECT4(a[127:0], b[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], b[33:32]) +tmp_dst[95:64] := SELECT4(a[127:0], b[65:64]) +tmp_dst[127:96] := SELECT4(a[127:0], b[97:96]) +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle single-precision (32-bit) floating-point elements in "a" using the - control in "imm8", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) - tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle single-precision (32-bit) floating-point elements in "a" using the - control in "b", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - tmp_dst[31:0] := SELECT4(a[127:0], b[1:0]) - tmp_dst[63:32] := SELECT4(a[127:0], b[33:32]) - tmp_dst[95:64] := SELECT4(a[127:0], b[65:64]) - tmp_dst[127:96] := SELECT4(a[127:0], b[97:96]) - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +tmp_dst[31:0] := SELECT4(a[127:0], b[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], b[33:32]) +tmp_dst[95:64] := SELECT4(a[127:0], b[65:64]) +tmp_dst[127:96] := SELECT4(a[127:0], b[97:96]) +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle double-precision (64-bit) floating-point elements in "a" across lanes - using the control in "imm8", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[63:0] := src[63:0] - 1: tmp[63:0] := src[127:64] - 2: tmp[63:0] := src[191:128] - 3: tmp[63:0] := src[255:192] - ESAC - RETURN tmp[63:0] - } - tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0]) - tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2]) - tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4]) - tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6]) - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} +tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle double-precision (64-bit) floating-point elements in "a" across lanes - using the corresponding index in "idx", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - id := idx[i+1:i]*64 - IF k[j] - dst[i+63:i] := a[id+63:id] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + id := idx[i+1:i]*64 + IF k[j] + dst[i+63:i] := a[id+63:id] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle double-precision (64-bit) floating-point elements in "a" across lanes - using the control in "imm8", and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[63:0] := src[63:0] - 1: tmp[63:0] := src[127:64] - 2: tmp[63:0] := src[191:128] - 3: tmp[63:0] := src[255:192] - ESAC - RETURN tmp[63:0] - } - tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0]) - tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2]) - tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4]) - tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6]) - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} +tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle double-precision (64-bit) floating-point elements in "a" across lanes - using the corresponding index in "idx", and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - id := idx[i+1:i]*64 - IF k[j] - dst[i+63:i] := a[id+63:id] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + id := idx[i+1:i]*64 + IF k[j] + dst[i+63:i] := a[id+63:id] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Shuffle double-precision (64-bit) floating-point elements in "a" across lanes - using the control in "imm8", and store the results in "dst". - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[63:0] := src[63:0] - 1: tmp[63:0] := src[127:64] - 2: tmp[63:0] := src[191:128] - 3: tmp[63:0] := src[255:192] - ESAC - RETURN tmp[63:0] - } - dst[63:0] := SELECT4(a[255:0], imm8[1:0]) - dst[127:64] := SELECT4(a[255:0], imm8[3:2]) - dst[191:128] := SELECT4(a[255:0], imm8[5:4]) - dst[255:192] := SELECT4(a[255:0], imm8[7:6]) - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the control in "imm8", and store the results in "dst". + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} +dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Shuffle double-precision (64-bit) floating-point elements in "a" across lanes - using the corresponding index in "idx", and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - id := idx[i+1:i]*64 - dst[i+63:i] := a[id+63:id] - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + id := idx[i+1:i]*64 + dst[i+63:i] := a[id+63:id] +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle single-precision (32-bit) floating-point elements in "a" across lanes - using the corresponding index in "idx", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - id := idx[i+2:i]*32 - IF k[j] - dst[i+31:i] := a[id+31:id] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + id := idx[i+2:i]*32 + IF k[j] + dst[i+31:i] := a[id+31:id] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle single-precision (32-bit) floating-point elements in "a" across lanes - using the corresponding index in "idx", and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - id := idx[i+2:i]*32 - IF k[j] - dst[i+31:i] := a[id+31:id] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + id := idx[i+2:i]*32 + IF k[j] + dst[i+31:i] := a[id+31:id] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Shuffle single-precision (32-bit) floating-point elements in "a" across lanes - using the corresponding index in "idx". - - FOR j := 0 to 7 - i := j*32 - id := idx[i+2:i]*32 - dst[i+31:i] := a[id+31:id] - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Shuffle single-precision (32-bit) floating-point elements in "a" across lanes using the corresponding index in "idx". + +FOR j := 0 to 7 + i := j*32 + id := idx[i+2:i]*32 + dst[i+31:i] := a[id+31:id] +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 64-bit integers in "a" across lanes lanes using the control in "imm8", - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[63:0] := src[63:0] - 1: tmp[63:0] := src[127:64] - 2: tmp[63:0] := src[191:128] - 3: tmp[63:0] := src[255:192] - ESAC - RETURN tmp[63:0] - } - tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0]) - tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2]) - tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4]) - tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6]) - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 64-bit integers in "a" across lanes lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} +tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 64-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - id := idx[i+1:i]*64 - IF k[j] - dst[i+63:i] := a[id+63:id] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 64-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + id := idx[i+1:i]*64 + IF k[j] + dst[i+63:i] := a[id+63:id] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle 64-bit integers in "a" across lanes using the control in "imm8", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[63:0] := src[63:0] - 1: tmp[63:0] := src[127:64] - 2: tmp[63:0] := src[191:128] - 3: tmp[63:0] := src[255:192] - ESAC - RETURN tmp[63:0] - } - tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0]) - tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2]) - tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4]) - tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6]) - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle 64-bit integers in "a" across lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} +tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle 64-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - id := idx[i+1:i]*64 - IF k[j] - dst[i+63:i] := a[id+63:id] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle 64-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + id := idx[i+1:i]*64 + IF k[j] + dst[i+63:i] := a[id+63:id] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Shuffle 64-bit integers in "a" across lanes using the control in "imm8", and - store the results in "dst". - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[63:0] := src[63:0] - 1: tmp[63:0] := src[127:64] - 2: tmp[63:0] := src[191:128] - 3: tmp[63:0] := src[255:192] - ESAC - RETURN tmp[63:0] - } - dst[63:0] := SELECT4(a[255:0], imm8[1:0]) - dst[127:64] := SELECT4(a[255:0], imm8[3:2]) - dst[191:128] := SELECT4(a[255:0], imm8[5:4]) - dst[255:192] := SELECT4(a[255:0], imm8[7:6]) - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Shuffle 64-bit integers in "a" across lanes using the control in "imm8", and store the results in "dst". + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} +dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Shuffle 64-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - id := idx[i+1:i]*64 - dst[i+63:i] := a[id+63:id] - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Shuffle 64-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + id := idx[i+1:i]*64 + dst[i+63:i] := a[id+63:id] +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Load contiguous active 32-bit integers from "a" (those with their respective - bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[m+31:m] - m := m + 32 - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Load contiguous active 32-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[m+31:m] + m := m + 32 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Load contiguous active 32-bit integers from "a" (those with their respective - bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[m+31:m] - m := m + 32 - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Load contiguous active 32-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[m+31:m] + m := m + 32 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Load contiguous active 32-bit integers from "a" (those with their respective - bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[m+31:m] - m := m + 32 - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Load contiguous active 32-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[m+31:m] + m := m + 32 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Load contiguous active 32-bit integers from "a" (those with their respective - bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[m+31:m] - m := m + 32 - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Load contiguous active 32-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[m+31:m] + m := m + 32 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Load contiguous active 64-bit integers from "a" (those with their respective - bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[m+63:m] - m := m + 64 - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Load contiguous active 64-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[m+63:m] + m := m + 64 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Load contiguous active 64-bit integers from "a" (those with their respective - bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[m+63:m] - m := m + 64 - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Load contiguous active 64-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[m+63:m] + m := m + 64 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Load contiguous active 64-bit integers from "a" (those with their respective - bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[m+63:m] - m := m + 64 - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Load contiguous active 64-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[m+63:m] + m := m + 64 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Load contiguous active 64-bit integers from "a" (those with their respective - bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[m+63:m] - m := m + 64 - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Load contiguous active 64-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[m+63:m] + m := m + 64 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 32-bit integers in "a" within 128-bit lanes using the control in - "imm8", and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) - tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) - tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) - tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) - tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4]) - tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6]) - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 32-bit integers in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4]) +tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6]) +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle 32-bit integers in "a" within 128-bit lanes using the control in - "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) - tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) - tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) - tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) - tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4]) - tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6]) - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle 32-bit integers in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4]) +tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6]) +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 32-bit integers in "a" using the control in "imm8", and store the - results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) - tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 32-bit integers in "a" using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle 32-bit integers in "a" using the control in "imm8", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) - tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle 32-bit integers in "a" using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave 32-bit integers from the high half of each 128-bit lane - in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[95:64] - dst[63:32] := src2[95:64] - dst[95:64] := src1[127:96] - dst[127:96] := src2[127:96] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave 32-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave 32-bit integers from the high half of each 128-bit lane - in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[95:64] - dst[63:32] := src2[95:64] - dst[95:64] := src1[127:96] - dst[127:96] := src2[127:96] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave 32-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave 32-bit integers from the high half of "a" and "b", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[95:64] - dst[63:32] := src2[95:64] - dst[95:64] := src1[127:96] - dst[127:96] := src2[127:96] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave 32-bit integers from the high half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave 32-bit integers from the high half of "a" and "b", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[95:64] - dst[63:32] := src2[95:64] - dst[95:64] := src1[127:96] - dst[127:96] := src2[127:96] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave 32-bit integers from the high half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave 64-bit integers from the high half of each 128-bit lane - in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[127:64] - dst[127:64] := src2[127:64] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave 64-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave 64-bit integers from the high half of each 128-bit lane - in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[127:64] - dst[127:64] := src2[127:64] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave 64-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave 64-bit integers from the high half of "a" and "b", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[127:64] - dst[127:64] := src2[127:64] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave 64-bit integers from the high half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave 64-bit integers from the high half of "a" and "b", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[127:64] - dst[127:64] := src2[127:64] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave 64-bit integers from the high half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave 32-bit integers from the low half of each 128-bit lane in - "a" and "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[31:0] - dst[63:32] := src2[31:0] - dst[95:64] := src1[63:32] - dst[127:96] := src2[63:32] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave 32-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave 32-bit integers from the low half of each 128-bit lane in - "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[31:0] - dst[63:32] := src2[31:0] - dst[95:64] := src1[63:32] - dst[127:96] := src2[63:32] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave 32-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave 32-bit integers from the low half of "a" and "b", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[31:0] - dst[63:32] := src2[31:0] - dst[95:64] := src1[63:32] - dst[127:96] := src2[63:32] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave 32-bit integers from the low half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave 32-bit integers from the low half of "a" and "b", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[31:0] - dst[63:32] := src2[31:0] - dst[95:64] := src1[63:32] - dst[127:96] := src2[63:32] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave 32-bit integers from the low half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave 64-bit integers from the low half of each 128-bit lane in - "a" and "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[63:0] - dst[127:64] := src2[63:0] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave 64-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave 64-bit integers from the low half of each 128-bit lane in - "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[63:0] - dst[127:64] := src2[63:0] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave 64-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave 64-bit integers from the low half of "a" and "b", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[63:0] - dst[127:64] := src2[63:0] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave 64-bit integers from the low half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave 64-bit integers from the low half of "a" and "b", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[63:0] - dst[127:64] := src2[63:0] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave 64-bit integers from the low half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Round packed double-precision (64-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). [round_imm_note] - - DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - IF IsInf(tmp[63:0]) - tmp[63:0] := src1[63:0] - FI - RETURN tmp[63:0] - } - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note] + +DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + IF IsInf(tmp[63:0]) + tmp[63:0] := src1[63:0] + FI + RETURN tmp[63:0] +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Round packed double-precision (64-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [round_imm_note] - - DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - IF IsInf(tmp[63:0]) - tmp[63:0] := src1[63:0] - FI - RETURN tmp[63:0] - } - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note] + +DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + IF IsInf(tmp[63:0]) + tmp[63:0] := src1[63:0] + FI + RETURN tmp[63:0] +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Round packed double-precision (64-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst". - [round_imm_note] - - DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - IF IsInf(tmp[63:0]) - tmp[63:0] := src1[63:0] - FI - RETURN tmp[63:0] - } - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". [round_imm_note] + +DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + IF IsInf(tmp[63:0]) + tmp[63:0] := src1[63:0] + FI + RETURN tmp[63:0] +} +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Round packed double-precision (64-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). [round_imm_note] - - DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - IF IsInf(tmp[63:0]) - tmp[63:0] := src1[63:0] - FI - RETURN tmp[63:0] - } - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note] + +DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + IF IsInf(tmp[63:0]) + tmp[63:0] := src1[63:0] + FI + RETURN tmp[63:0] +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Round packed double-precision (64-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [round_imm_note] - - DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - IF IsInf(tmp[63:0]) - tmp[63:0] := src1[63:0] - FI - RETURN tmp[63:0] - } - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note] + +DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + IF IsInf(tmp[63:0]) + tmp[63:0] := src1[63:0] + FI + RETURN tmp[63:0] +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Round packed double-precision (64-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst". - [round_imm_note] - - DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - IF IsInf(tmp[63:0]) - tmp[63:0] := src1[63:0] - FI - RETURN tmp[63:0] - } - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". [round_imm_note] + +DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + IF IsInf(tmp[63:0]) + tmp[63:0] := src1[63:0] + FI + RETURN tmp[63:0] +} +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Round packed single-precision (32-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). [round_imm_note] - - DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - IF IsInf(tmp[31:0]) - tmp[31:0] := src1[31:0] - FI - RETURN tmp[31:0] - } - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note] + +DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + IF IsInf(tmp[31:0]) + tmp[31:0] := src1[31:0] + FI + RETURN tmp[31:0] +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Round packed single-precision (32-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [round_imm_note] - - DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - IF IsInf(tmp[31:0]) - tmp[31:0] := src1[31:0] - FI - RETURN tmp[31:0] - } - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note] + +DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + IF IsInf(tmp[31:0]) + tmp[31:0] := src1[31:0] + FI + RETURN tmp[31:0] +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Round packed single-precision (32-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst". - [round_imm_note] - - DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - IF IsInf(tmp[31:0]) - tmp[31:0] := src1[31:0] - FI - RETURN tmp[31:0] - } - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". [round_imm_note] + +DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + IF IsInf(tmp[31:0]) + tmp[31:0] := src1[31:0] + FI + RETURN tmp[31:0] +} +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Round packed single-precision (32-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). [round_imm_note] - - DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - IF IsInf(tmp[31:0]) - tmp[31:0] := src1[31:0] - FI - RETURN tmp[31:0] - } - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note] + +DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + IF IsInf(tmp[31:0]) + tmp[31:0] := src1[31:0] + FI + RETURN tmp[31:0] +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Round packed single-precision (32-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [round_imm_note] - - DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - IF IsInf(tmp[31:0]) - tmp[31:0] := src1[31:0] - FI - RETURN tmp[31:0] - } - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note] + +DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + IF IsInf(tmp[31:0]) + tmp[31:0] := src1[31:0] + FI + RETURN tmp[31:0] +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Round packed single-precision (32-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst". - [round_imm_note] - - DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - IF IsInf(tmp[31:0]) - tmp[31:0] := src1[31:0] - FI - RETURN tmp[31:0] - } - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". [round_imm_note] + +DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + IF IsInf(tmp[31:0]) + tmp[31:0] := src1[31:0] + FI + RETURN tmp[31:0] +} +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Scale the packed double-precision (64-bit) floating-point elements in "a" using - values from "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) - RETURN dst[63:0] - } - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Scale the packed double-precision (64-bit) floating-point elements in "a" using - values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) - RETURN dst[63:0] - } - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Scale the packed double-precision (64-bit) floating-point elements in "a" using - values from "b", and store the results in "dst". - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst". + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) - RETURN dst[63:0] - } - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Scale the packed double-precision (64-bit) floating-point elements in "a" using - values from "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) - RETURN dst[63:0] - } - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Scale the packed double-precision (64-bit) floating-point elements in "a" using - values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) - RETURN dst[63:0] - } - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Scale the packed double-precision (64-bit) floating-point elements in "a" using - values from "b", and store the results in "dst". - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst". + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) - RETURN dst[63:0] - } - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Scale the packed single-precision (32-bit) floating-point elements in "a" using - values from "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) - RETURN dst[31:0] - } - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) + RETURN dst[31:0] +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Scale the packed single-precision (32-bit) floating-point elements in "a" using - values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) - RETURN dst[31:0] - } - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) + RETURN dst[31:0] +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Scale the packed single-precision (32-bit) floating-point elements in "a" using - values from "b", and store the results in "dst". - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst". + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) - RETURN dst[31:0] - } - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) + RETURN dst[31:0] +} +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Scale the packed single-precision (32-bit) floating-point elements in "a" using - values from "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) - RETURN dst[31:0] - } - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) + RETURN dst[31:0] +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Scale the packed single-precision (32-bit) floating-point elements in "a" using - values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) - RETURN dst[31:0] - } - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) + RETURN dst[31:0] +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Scale the packed single-precision (32-bit) floating-point elements in "a" using - values from "b", and store the results in "dst". - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst". + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) - RETURN dst[31:0] - } - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) + RETURN dst[31:0] +} +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point - elements) selected by "imm8" from "a" and "b", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - tmp_dst.m128[0] := a.m128[imm8[0]] - tmp_dst.m128[1] := b.m128[imm8[1]] - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst.m128[0] := a.m128[imm8[0]] +tmp_dst.m128[1] := b.m128[imm8[1]] +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point - elements) selected by "imm8" from "a" and "b", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - tmp_dst.m128[0] := a.m128[imm8[0]] - tmp_dst.m128[1] := b.m128[imm8[1]] - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst.m128[0] := a.m128[imm8[0]] +tmp_dst.m128[1] := b.m128[imm8[1]] +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point - elements) selected by "imm8" from "a" and "b", and store the results in "dst". - - dst.m128[0] := a.m128[imm8[0]] - dst.m128[1] := b.m128[imm8[1]] - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst". + +dst.m128[0] := a.m128[imm8[0]] +dst.m128[1] := b.m128[imm8[1]] +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point - elements) selected by "imm8" from "a" and "b", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - tmp_dst.m128[0] := a.m128[imm8[0]] - tmp_dst.m128[1] := b.m128[imm8[1]] - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst.m128[0] := a.m128[imm8[0]] +tmp_dst.m128[1] := b.m128[imm8[1]] +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point - elements) selected by "imm8" from "a" and "b", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - tmp_dst.m128[0] := a.m128[imm8[0]] - tmp_dst.m128[1] := b.m128[imm8[1]] - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst.m128[0] := a.m128[imm8[0]] +tmp_dst.m128[1] := b.m128[imm8[1]] +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point - elements) selected by "imm8" from "a" and "b", and store the results in "dst". - - dst.m128[0] := a.m128[imm8[0]] - dst.m128[1] := b.m128[imm8[1]] - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst". + +dst.m128[0] := a.m128[imm8[0]] +dst.m128[1] := b.m128[imm8[1]] +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Shuffle 128-bits (composed of 4 32-bit integers) selected by "imm8" from "a" - and "b", and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - tmp_dst.m128[0] := a.m128[imm8[0]] - tmp_dst.m128[1] := b.m128[imm8[1]] - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Shuffle 128-bits (composed of 4 32-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst.m128[0] := a.m128[imm8[0]] +tmp_dst.m128[1] := b.m128[imm8[1]] +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 128-bits (composed of 4 32-bit integers) selected by "imm8" from "a" - and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - tmp_dst.m128[0] := a.m128[imm8[0]] - tmp_dst.m128[1] := b.m128[imm8[1]] - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 128-bits (composed of 4 32-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst.m128[0] := a.m128[imm8[0]] +tmp_dst.m128[1] := b.m128[imm8[1]] +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle 128-bits (composed of 4 32-bit integers) selected by "imm8" from "a" - and "b", and store the results in "dst". - - dst.m128[0] := a.m128[imm8[0]] - dst.m128[1] := b.m128[imm8[1]] - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle 128-bits (composed of 4 32-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst". + +dst.m128[0] := a.m128[imm8[0]] +dst.m128[1] := b.m128[imm8[1]] +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Shuffle 128-bits (composed of 2 64-bit integers) selected by "imm8" from "a" - and "b", and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - tmp_dst.m128[0] := a.m128[imm8[0]] - tmp_dst.m128[1] := b.m128[imm8[1]] - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Shuffle 128-bits (composed of 2 64-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst.m128[0] := a.m128[imm8[0]] +tmp_dst.m128[1] := b.m128[imm8[1]] +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle 128-bits (composed of 2 64-bit integers) selected by "imm8" from "a" - and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - tmp_dst.m128[0] := a.m128[imm8[0]] - tmp_dst.m128[1] := b.m128[imm8[1]] - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle 128-bits (composed of 2 64-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst.m128[0] := a.m128[imm8[0]] +tmp_dst.m128[1] := b.m128[imm8[1]] +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle 128-bits (composed of 2 64-bit integers) selected by "imm8" from "a" - and "b", and store the results in "dst". - - dst.m128[0] := a.m128[imm8[0]] - dst.m128[1] := b.m128[imm8[1]] - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle 128-bits (composed of 2 64-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst". + +dst.m128[0] := a.m128[imm8[0]] +dst.m128[1] := b.m128[imm8[1]] +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes - using the control in "imm8", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - tmp_dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] - tmp_dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] - tmp_dst[191:128] := (imm8[2] == 0) ? a[191:128] : a[255:192] - tmp_dst[255:192] := (imm8[3] == 0) ? b[191:128] : b[255:192] - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] +tmp_dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] +tmp_dst[191:128] := (imm8[2] == 0) ? a[191:128] : a[255:192] +tmp_dst[255:192] := (imm8[3] == 0) ? b[191:128] : b[255:192] +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes - using the control in "imm8", and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - tmp_dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] - tmp_dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] - tmp_dst[191:128] := (imm8[2] == 0) ? a[191:128] : a[255:192] - tmp_dst[255:192] := (imm8[3] == 0) ? b[191:128] : b[255:192] - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] +tmp_dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] +tmp_dst[191:128] := (imm8[2] == 0) ? a[191:128] : a[255:192] +tmp_dst[255:192] := (imm8[3] == 0) ? b[191:128] : b[255:192] +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Shuffle double-precision (64-bit) floating-point elements using the control in - "imm8", and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - tmp_dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] - tmp_dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Shuffle double-precision (64-bit) floating-point elements using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] +tmp_dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle double-precision (64-bit) floating-point elements using the control in - "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - tmp_dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] - tmp_dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle double-precision (64-bit) floating-point elements using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] +tmp_dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit - lanes using the control in "imm8", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - tmp_dst[95:64] := SELECT4(b[127:0], imm8[5:4]) - tmp_dst[127:96] := SELECT4(b[127:0], imm8[7:6]) - tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) - tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) - tmp_dst[223:192] := SELECT4(b[255:128], imm8[5:4]) - tmp_dst[255:224] := SELECT4(b[255:128], imm8[7:6]) - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(b[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(b[127:0], imm8[7:6]) +tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +tmp_dst[223:192] := SELECT4(b[255:128], imm8[5:4]) +tmp_dst[255:224] := SELECT4(b[255:128], imm8[7:6]) +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit - lanes using the control in "imm8", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - tmp_dst[95:64] := SELECT4(b[127:0], imm8[5:4]) - tmp_dst[127:96] := SELECT4(b[127:0], imm8[7:6]) - tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) - tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) - tmp_dst[223:192] := SELECT4(b[255:128], imm8[5:4]) - tmp_dst[255:224] := SELECT4(b[255:128], imm8[7:6]) - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(b[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(b[127:0], imm8[7:6]) +tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +tmp_dst[223:192] := SELECT4(b[255:128], imm8[5:4]) +tmp_dst[255:224] := SELECT4(b[255:128], imm8[7:6]) +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Shuffle single-precision (32-bit) floating-point elements in "a" using the - control in "imm8", and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - tmp_dst[95:64] := SELECT4(b[127:0], imm8[5:4]) - tmp_dst[127:96] := SELECT4(b[127:0], imm8[7:6]) - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(b[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(b[127:0], imm8[7:6]) +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Shuffle single-precision (32-bit) floating-point elements in "a" using the - control in "imm8", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - tmp_dst[95:64] := SELECT4(b[127:0], imm8[5:4]) - tmp_dst[127:96] := SELECT4(b[127:0], imm8[7:6]) - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(b[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(b[127:0], imm8[7:6]) +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave double-precision (64-bit) floating-point elements from - the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[127:64] - dst[127:64] := src2[127:64] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave double-precision (64-bit) floating-point elements from - the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[127:64] - dst[127:64] := src2[127:64] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave double-precision (64-bit) floating-point elements from - the high half of "a" and "b", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[127:64] - dst[127:64] := src2[127:64] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the high half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave double-precision (64-bit) floating-point elements from - the high half of "a" and "b", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[127:64] - dst[127:64] := src2[127:64] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the high half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave single-precision (32-bit) floating-point elements from - the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[95:64] - dst[63:32] := src2[95:64] - dst[95:64] := src1[127:96] - dst[127:96] := src2[127:96] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave single-precision (32-bit) floating-point elements from - the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[95:64] - dst[63:32] := src2[95:64] - dst[95:64] := src1[127:96] - dst[127:96] := src2[127:96] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave single-precision (32-bit) floating-point elements from - the high half of "a" and "b", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[95:64] - dst[63:32] := src2[95:64] - dst[95:64] := src1[127:96] - dst[127:96] := src2[127:96] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the high half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave single-precision (32-bit) floating-point elements from - the high half of "a" and "b", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[95:64] - dst[63:32] := src2[95:64] - dst[95:64] := src1[127:96] - dst[127:96] := src2[127:96] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the high half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave double-precision (64-bit) floating-point elements from - the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[63:0] - dst[127:64] := src2[63:0] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave double-precision (64-bit) floating-point elements from - the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[63:0] - dst[127:64] := src2[63:0] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave double-precision (64-bit) floating-point elements from - the low half of "a" and "b", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[63:0] - dst[127:64] := src2[63:0] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the low half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave double-precision (64-bit) floating-point elements from - the low half of "a" and "b", and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[63:0] - dst[127:64] := src2[63:0] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the low half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave single-precision (32-bit) floating-point elements from - the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[31:0] - dst[63:32] := src2[31:0] - dst[95:64] := src1[63:32] - dst[127:96] := src2[63:32] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave single-precision (32-bit) floating-point elements from - the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[31:0] - dst[63:32] := src2[31:0] - dst[95:64] := src1[63:32] - dst[127:96] := src2[63:32] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Unpack and interleave single-precision (32-bit) floating-point elements from - the low half of "a" and "b", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[31:0] - dst[63:32] := src2[31:0] - dst[95:64] := src1[63:32] - dst[127:96] := src2[63:32] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the low half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Unpack and interleave single-precision (32-bit) floating-point elements from - the low half of "a" and "b", and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[31:0] - dst[63:32] := src2[31:0] - dst[95:64] := src1[63:32] - dst[127:96] := src2[63:32] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the low half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k". - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 3 - i := j*64 - k[j] := (a[i+63:i] OP b[i+63:i]) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 3 + i := j*64 + k[j] := (a[i+63:i] OP b[i+63:i]) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit - is not set). - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 3 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k". - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 1 - i := j*64 - k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit - is not set). - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 1 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k". - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 7 - i := j*32 - k[j] := (a[i+31:i] OP b[i+31:i]) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 7 + i := j*32 + k[j] := (a[i+31:i] OP b[i+31:i]) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit - is not set). - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 7 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k". - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 3 - i := j*32 - k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit - is not set). - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 3 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 32-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k". - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 7 - i := j*32 - k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 32-bit integers in "a" and "b" for equality, and store - the results in mask vector "k". - - FOR j := 0 to 7 - i := j*32 - k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 32-bit integers in "a" and "b" for greater-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 7 - i := j*32 - k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 32-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k". - - FOR j := 0 to 7 - i := j*32 - k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 32-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 7 - i := j*32 - k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 32-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k". - - FOR j := 0 to 7 - i := j*32 - k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 32-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k". - - FOR j := 0 to 7 - i := j*32 - k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - - Compare packed signed 32-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 7 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + + Compare packed signed 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 32-bit integers in "a" and "b" for equality, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 32-bit integers in "a" and "b" for greater-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 32-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 32-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 32-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 32-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 32-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k". - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 3 - i := j*32 - k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 32-bit integers in "a" and "b" for equality, and store - the results in mask vector "k". - - FOR j := 0 to 3 - i := j*32 - k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 32-bit integers in "a" and "b" for greater-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 3 - i := j*32 - k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 32-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k". - - FOR j := 0 to 3 - i := j*32 - k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 32-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 3 - i := j*32 - k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 32-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k". - - FOR j := 0 to 3 - i := j*32 - k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 32-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k". - - FOR j := 0 to 3 - i := j*32 - k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - - Compare packed signed 32-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 3 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + + Compare packed signed 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 32-bit integers in "a" and "b" for equality, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 32-bit integers in "a" and "b" for greater-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 32-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 32-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 32-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 32-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 64-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k". - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 3 - i := j*64 - k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 64-bit integers in "a" and "b" for equality, and store - the results in mask vector "k". - - FOR j := 0 to 3 - i := j*64 - k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 64-bit integers in "a" and "b" for greater-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 3 - i := j*64 - k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 64-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k". - - FOR j := 0 to 3 - i := j*64 - k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 64-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 3 - i := j*64 - k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 64-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k". - - FOR j := 0 to 3 - i := j*64 - k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 64-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k". - - FOR j := 0 to 3 - i := j*64 - k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - - Compare packed signed 64-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 3 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + + Compare packed signed 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 64-bit integers in "a" and "b" for equality, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 64-bit integers in "a" and "b" for greater-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 64-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 64-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 64-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 64-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 64-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k". - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 1 - i := j*64 - k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 64-bit integers in "a" and "b" for equality, and store - the results in mask vector "k". - - FOR j := 0 to 1 - i := j*64 - k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 64-bit integers in "a" and "b" for greater-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 1 - i := j*64 - k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 64-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k". - - FOR j := 0 to 1 - i := j*64 - k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 64-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 1 - i := j*64 - k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 64-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k". - - FOR j := 0 to 1 - i := j*64 - k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed signed 64-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k". - - FOR j := 0 to 1 - i := j*64 - k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed signed 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - - Compare packed signed 64-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 1 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + + Compare packed signed 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 64-bit integers in "a" and "b" for equality, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 64-bit integers in "a" and "b" for greater-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 64-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 64-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 64-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed signed 64-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed signed 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 32-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k". - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 7 - i := j*32 - k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 32-bit integers in "a" and "b" for equality, and store - the results in mask vector "k". - - FOR j := 0 to 7 - i := j*32 - k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 32-bit integers in "a" and "b" for - greater-than-or-equal, and store the results in mask vector "k". - - FOR j := 0 to 7 - i := j*32 - k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 32-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k". - - FOR j := 0 to 7 - i := j*32 - k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 32-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 7 - i := j*32 - k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 32-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k". - - FOR j := 0 to 7 - i := j*32 - k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 32-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k". - - FOR j := 0 to 7 - i := j*32 - k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*32 + k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - - Compare packed unsigned 32-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 7 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + + Compare packed unsigned 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 32-bit integers in "a" and "b" for equality, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 32-bit integers in "a" and "b" for - greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 32-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 32-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 32-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 32-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 32-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k". - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 3 - i := j*32 - k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 32-bit integers in "a" and "b" for equality, and store - the results in mask vector "k". - - FOR j := 0 to 3 - i := j*32 - k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 32-bit integers in "a" and "b" for - greater-than-or-equal, and store the results in mask vector "k". - - FOR j := 0 to 3 - i := j*32 - k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 32-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k". - - FOR j := 0 to 3 - i := j*32 - k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 32-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 3 - i := j*32 - k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 32-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k". - - FOR j := 0 to 3 - i := j*32 - k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 32-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k". - - FOR j := 0 to 3 - i := j*32 - k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*32 + k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - - Compare packed unsigned 32-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 3 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + + Compare packed unsigned 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 32-bit integers in "a" and "b" for equality, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 32-bit integers in "a" and "b" for - greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 32-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 32-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 32-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 32-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 64-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k". - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 3 - i := j*64 - k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 64-bit integers in "a" and "b" for equality, and store - the results in mask vector "k". - - FOR j := 0 to 3 - i := j*64 - k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 64-bit integers in "a" and "b" for - greater-than-or-equal, and store the results in mask vector "k". - - FOR j := 0 to 3 - i := j*64 - k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 64-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k". - - FOR j := 0 to 3 - i := j*64 - k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 64-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 3 - i := j*64 - k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 64-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k". - - FOR j := 0 to 3 - i := j*64 - k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 64-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k". - - FOR j := 0 to 3 - i := j*64 - k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 3 + i := j*64 + k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - - Compare packed unsigned 64-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 3 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + + Compare packed unsigned 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 64-bit integers in "a" and "b" for equality, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 64-bit integers in "a" and "b" for - greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 64-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 64-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 64-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 64-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 64-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k". - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 1 - i := j*64 - k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 64-bit integers in "a" and "b" for equality, and store - the results in mask vector "k". - - FOR j := 0 to 1 - i := j*64 - k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 64-bit integers in "a" and "b" for - greater-than-or-equal, and store the results in mask vector "k". - - FOR j := 0 to 1 - i := j*64 - k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 64-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k". - - FOR j := 0 to 1 - i := j*64 - k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 64-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 1 - i := j*64 - k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 64-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k". - - FOR j := 0 to 1 - i := j*64 - k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compare packed unsigned 64-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k". - - FOR j := 0 to 1 - i := j*64 - k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 1 + i := j*64 + k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - - Compare packed unsigned 64-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 1 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + + Compare packed unsigned 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 64-bit integers in "a" and "b" for equality, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 64-bit integers in "a" and "b" for - greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 64-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 64-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 64-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 64-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed unsigned 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compute the bitwise AND of packed 32-bit integers in "a" and "b", producing - intermediate 32-bit values, and set the corresponding bit in result mask "k" (subject to - writemask "k") if the intermediate value is non-zero. - - FOR j := 0 to 7 - i := j*32 - IF k1[j] - k[j] := ((a[i+31:i] AND b[i+31:i]) != 0) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compute the bitwise AND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero. + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ((a[i+31:i] AND b[i+31:i]) != 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compute the bitwise AND of packed 32-bit integers in "a" and "b", producing - intermediate 32-bit values, and set the corresponding bit in result mask "k" if the - intermediate value is non-zero. - - FOR j := 0 to 7 - i := j*32 - k[j] := ((a[i+31:i] AND b[i+31:i]) != 0) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compute the bitwise AND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero. + +FOR j := 0 to 7 + i := j*32 + k[j] := ((a[i+31:i] AND b[i+31:i]) != 0) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compute the bitwise AND of packed 32-bit integers in "a" and "b", producing - intermediate 32-bit values, and set the corresponding bit in result mask "k" (subject to - writemask "k") if the intermediate value is non-zero. - - FOR j := 0 to 3 - i := j*32 - IF k1[j] - k[j] := ((a[i+31:i] AND b[i+31:i]) != 0) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compute the bitwise AND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero. + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ((a[i+31:i] AND b[i+31:i]) != 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compute the bitwise AND of packed 32-bit integers in "a" and "b", producing - intermediate 32-bit values, and set the corresponding bit in result mask "k" if the - intermediate value is non-zero. - - FOR j := 0 to 3 - i := j*32 - k[j] := ((a[i+31:i] AND b[i+31:i]) != 0) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compute the bitwise AND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero. + +FOR j := 0 to 3 + i := j*32 + k[j] := ((a[i+31:i] AND b[i+31:i]) != 0) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compute the bitwise AND of packed 64-bit integers in "a" and "b", producing - intermediate 64-bit values, and set the corresponding bit in result mask "k" (subject to - writemask "k") if the intermediate value is non-zero. - - FOR j := 0 to 3 - i := j*64 - IF k1[j] - k[j] := ((a[i+63:i] AND b[i+63:i]) != 0) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compute the bitwise AND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero. + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ((a[i+63:i] AND b[i+63:i]) != 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compute the bitwise AND of packed 64-bit integers in "a" and "b", producing - intermediate 64-bit values, and set the corresponding bit in result mask "k" if the - intermediate value is non-zero. - - FOR j := 0 to 3 - i := j*64 - k[j] := ((a[i+63:i] AND b[i+63:i]) != 0) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compute the bitwise AND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero. + +FOR j := 0 to 3 + i := j*64 + k[j] := ((a[i+63:i] AND b[i+63:i]) != 0) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compute the bitwise AND of packed 64-bit integers in "a" and "b", producing - intermediate 64-bit values, and set the corresponding bit in result mask "k" (subject to - writemask "k") if the intermediate value is non-zero. - - FOR j := 0 to 1 - i := j*64 - IF k1[j] - k[j] := ((a[i+63:i] AND b[i+63:i]) != 0) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compute the bitwise AND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero. + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ((a[i+63:i] AND b[i+63:i]) != 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compute the bitwise AND of packed 64-bit integers in "a" and "b", producing - intermediate 64-bit values, and set the corresponding bit in result mask "k" if the - intermediate value is non-zero. - - FOR j := 0 to 1 - i := j*64 - k[j] := ((a[i+63:i] AND b[i+63:i]) != 0) ? 1 : 0 - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compute the bitwise AND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero. + +FOR j := 0 to 1 + i := j*64 + k[j] := ((a[i+63:i] AND b[i+63:i]) != 0) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compute the bitwise NAND of packed 32-bit integers in "a" and "b", producing - intermediate 32-bit values, and set the corresponding bit in result mask "k" (subject to - writemask "k") if the intermediate value is zero. - - FOR j := 0 to 7 - i := j*32 - IF k1[j] - k[j] := ((a[i+31:i] AND b[i+31:i]) == 0) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compute the bitwise NAND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero. + +FOR j := 0 to 7 + i := j*32 + IF k1[j] + k[j] := ((a[i+31:i] AND b[i+31:i]) == 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compute the bitwise NAND of packed 32-bit integers in "a" and "b", producing - intermediate 32-bit values, and set the corresponding bit in result mask "k" if the - intermediate value is zero. - - FOR j := 0 to 7 - i := j*32 - k[j] := ((a[i+31:i] AND b[i+31:i]) == 0) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compute the bitwise NAND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero. + +FOR j := 0 to 7 + i := j*32 + k[j] := ((a[i+31:i] AND b[i+31:i]) == 0) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compute the bitwise NAND of packed 32-bit integers in "a" and "b", producing - intermediate 32-bit values, and set the corresponding bit in result mask "k" (subject to - writemask "k") if the intermediate value is zero. - - FOR j := 0 to 3 - i := j*32 - IF k1[j] - k[j] := ((a[i+31:i] AND b[i+31:i]) == 0) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compute the bitwise NAND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero. + +FOR j := 0 to 3 + i := j*32 + IF k1[j] + k[j] := ((a[i+31:i] AND b[i+31:i]) == 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compute the bitwise NAND of packed 32-bit integers in "a" and "b", producing - intermediate 32-bit values, and set the corresponding bit in result mask "k" if the - intermediate value is zero. - - FOR j := 0 to 3 - i := j*32 - k[j] := ((a[i+31:i] AND b[i+31:i]) == 0) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compute the bitwise NAND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero. + +FOR j := 0 to 3 + i := j*32 + k[j] := ((a[i+31:i] AND b[i+31:i]) == 0) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compute the bitwise NAND of packed 64-bit integers in "a" and "b", producing - intermediate 64-bit values, and set the corresponding bit in result mask "k" (subject to - writemask "k") if the intermediate value is zero. - - FOR j := 0 to 3 - i := j*64 - IF k1[j] - k[j] := ((a[i+63:i] AND b[i+63:i]) == 0) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compute the bitwise NAND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero. + +FOR j := 0 to 3 + i := j*64 + IF k1[j] + k[j] := ((a[i+63:i] AND b[i+63:i]) == 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compute the bitwise NAND of packed 64-bit integers in "a" and "b", producing - intermediate 64-bit values, and set the corresponding bit in result mask "k" if the - intermediate value is zero. - - FOR j := 0 to 3 - i := j*64 - k[j] := ((a[i+63:i] AND b[i+63:i]) == 0) ? 1 : 0 - ENDFOR - k[MAX:4] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compute the bitwise NAND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero. + +FOR j := 0 to 3 + i := j*64 + k[j] := ((a[i+63:i] AND b[i+63:i]) == 0) ? 1 : 0 +ENDFOR +k[MAX:4] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Compute the bitwise NAND of packed 64-bit integers in "a" and "b", producing - intermediate 64-bit values, and set the corresponding bit in result mask "k" (subject to - writemask "k") if the intermediate value is zero. - - FOR j := 0 to 1 - i := j*64 - IF k1[j] - k[j] := ((a[i+63:i] AND b[i+63:i]) == 0) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + + Compute the bitwise NAND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero. + +FOR j := 0 to 1 + i := j*64 + IF k1[j] + k[j] := ((a[i+63:i] AND b[i+63:i]) == 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - Compute the bitwise NAND of packed 64-bit integers in "a" and "b", producing - intermediate 64-bit values, and set the corresponding bit in result mask "k" if the - intermediate value is zero. - - FOR j := 0 to 1 - i := j*64 - k[j] := ((a[i+63:i] AND b[i+63:i]) == 0) ? 1 : 0 - ENDFOR - k[MAX:2] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Compare + + + + Compute the bitwise NAND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero. + +FOR j := 0 to 1 + i := j*64 + k[j] := ((a[i+63:i] AND b[i+63:i]) == 0) ? 1 : 0 +ENDFOR +k[MAX:2] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Compare
- - - - - Contiguously store the active double-precision (64-bit) floating-point elements - in "a" (those with their respective bit set in writemask "k") to unaligned memory at - "base_addr". - - size := 64 - m := base_addr - FOR j := 0 to 3 - i := j*64 - IF k[j] - MEM[m+size-1:m] := a[i+63:i] - m := m + size - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 64 +m := base_addr +FOR j := 0 to 3 + i := j*64 + IF k[j] + MEM[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - Contiguously store the active double-precision (64-bit) floating-point elements - in "a" (those with their respective bit set in writemask "k") to unaligned memory at - "base_addr". - - size := 64 - m := base_addr - FOR j := 0 to 1 - i := j*64 - IF k[j] - MEM[m+size-1:m] := a[i+63:i] - m := m + size - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 64 +m := base_addr +FOR j := 0 to 1 + i := j*64 + IF k[j] + MEM[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - Contiguously store the active single-precision (32-bit) floating-point elements - in "a" (those with their respective bit set in writemask "k") to unaligned memory at - "base_addr". - - size := 32 - m := base_addr - FOR j := 0 to 7 - i := j*32 - IF k[j] - MEM[m+size-1:m] := a[i+31:i] - m := m + size - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 32 +m := base_addr +FOR j := 0 to 7 + i := j*32 + IF k[j] + MEM[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - Contiguously store the active single-precision (32-bit) floating-point elements - in "a" (those with their respective bit set in writemask "k") to unaligned memory at - "base_addr". - - size := 32 - m := base_addr - FOR j := 0 to 3 - i := j*32 - IF k[j] - MEM[m+size-1:m] := a[i+31:i] - m := m + size - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 32 +m := base_addr +FOR j := 0 to 3 + i := j*32 + IF k[j] + MEM[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - Store packed double-precision (64-bit) floating-point elements from "a" into - memory using writemask "k". - "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may - be generated. - - FOR j := 0 to 3 - i := j*64 - IF k[j] - MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + Store packed double-precision (64-bit) floating-point elements from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - Store packed double-precision (64-bit) floating-point elements from "a" into - memory using writemask "k". - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - FOR j := 0 to 1 - i := j*64 - IF k[j] - MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + Store packed double-precision (64-bit) floating-point elements from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - Store packed single-precision (32-bit) floating-point elements from "a" into - memory using writemask "k". - "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may - be generated. - - FOR j := 0 to 7 - i := j*32 - IF k[j] - MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + Store packed single-precision (32-bit) floating-point elements from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - Store packed single-precision (32-bit) floating-point elements from "a" into - memory using writemask "k". - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - FOR j := 0 to 3 - i := j*32 - IF k[j] - MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + Store packed single-precision (32-bit) floating-point elements from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - Store packed 32-bit integers from "a" into memory using writemask "k". - "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may - be generated. - - FOR j := 0 to 7 - i := j*32 - IF k[j] - MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + Store packed 32-bit integers from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - Store packed 32-bit integers from "a" into memory using writemask "k". - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - FOR j := 0 to 3 - i := j*32 - IF k[j] - MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + Store packed 32-bit integers from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - Store packed 64-bit integers from "a" into memory using writemask "k". - "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may - be generated. - - FOR j := 0 to 3 - i := j*64 - IF k[j] - MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + Store packed 64-bit integers from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - Store packed 64-bit integers from "a" into memory using writemask "k". - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - FOR j := 0 to 1 - i := j*64 - IF k[j] - MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + Store packed 64-bit integers from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - Store packed 32-bit integers from "a" into memory using writemask "k". - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 7 - i := j*32 - IF k[j] - MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + Store packed 32-bit integers from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - Store packed 32-bit integers from "a" into memory using writemask "k". - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 3 - i := j*32 - IF k[j] - MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + Store packed 32-bit integers from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - Store packed 64-bit integers from "a" into memory using writemask "k". - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 3 - i := j*64 - IF k[j] - MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + Store packed 64-bit integers from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - Store packed 64-bit integers from "a" into memory using writemask "k". - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 1 - i := j*64 - IF k[j] - MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + Store packed 64-bit integers from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - Store packed double-precision (64-bit) floating-point elements from "a" into - memory using writemask "k". - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 3 - i := j*64 - IF k[j] - MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + Store packed double-precision (64-bit) floating-point elements from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - Store packed double-precision (64-bit) floating-point elements from "a" into - memory using writemask "k". - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 1 - i := j*64 - IF k[j] - MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + Store packed double-precision (64-bit) floating-point elements from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - Store packed single-precision (32-bit) floating-point elements from "a" into - memory using writemask "k". - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 7 - i := j*32 - IF k[j] - MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + Store packed single-precision (32-bit) floating-point elements from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - Store packed single-precision (32-bit) floating-point elements from "a" into - memory using writemask "k". - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 3 - i := j*32 - IF k[j] - MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + Store packed single-precision (32-bit) floating-point elements from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - Contiguously store the active 32-bit integers in "a" (those with their - respective bit set in writemask "k") to unaligned memory at "base_addr". - - size := 32 - m := base_addr - FOR j := 0 to 7 - i := j*32 - IF k[j] - MEM[m+size-1:m] := a[i+31:i] - m := m + size - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + Contiguously store the active 32-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 32 +m := base_addr +FOR j := 0 to 7 + i := j*32 + IF k[j] + MEM[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - Contiguously store the active 32-bit integers in "a" (those with their - respective bit set in writemask "k") to unaligned memory at "base_addr". - - size := 32 - m := base_addr - FOR j := 0 to 3 - i := j*32 - IF k[j] - MEM[m+size-1:m] := a[i+31:i] - m := m + size - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + Contiguously store the active 32-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 32 +m := base_addr +FOR j := 0 to 3 + i := j*32 + IF k[j] + MEM[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - Contiguously store the active 64-bit integers in "a" (those with their - respective bit set in writemask "k") to unaligned memory at "base_addr". - - size := 64 - m := base_addr - FOR j := 0 to 3 - i := j*64 - IF k[j] - MEM[m+size-1:m] := a[i+63:i] - m := m + size - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + Contiguously store the active 64-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 64 +m := base_addr +FOR j := 0 to 3 + i := j*64 + IF k[j] + MEM[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - Contiguously store the active 64-bit integers in "a" (those with their - respective bit set in writemask "k") to unaligned memory at "base_addr". - - size := 64 - m := base_addr - FOR j := 0 to 1 - i := j*64 - IF k[j] - MEM[m+size-1:m] := a[i+63:i] - m := m + size - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + Contiguously store the active 64-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 64 +m := base_addr +FOR j := 0 to 1 + i := j*64 + IF k[j] + MEM[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - Scatter 32-bit integers from "a" into memory using 32-bit indices. 32-bit - elements are stored at addresses starting at "base_addr" and offset by each 32-bit - element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*32 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+31:addr] := a[i+31:i] - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + Scatter 32-bit integers from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*32 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+31:addr] := a[i+31:i] +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - - Scatter 32-bit integers from "a" into memory using 32-bit indices. 32-bit - elements are stored at addresses starting at "base_addr" and offset by each 32-bit - element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" - (elements are not stored when the corresponding mask bit is not set). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*32 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+31:addr] := a[i+31:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + + Scatter 32-bit integers from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*32 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+31:addr] := a[i+31:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - Scatter 32-bit integers from "a" into memory using 32-bit indices. 32-bit - elements are stored at addresses starting at "base_addr" and offset by each 32-bit - element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*32 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+31:addr] := a[i+31:i] - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + Scatter 32-bit integers from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*32 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+31:addr] := a[i+31:i] +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - - Scatter 32-bit integers from "a" into memory using 32-bit indices. 32-bit - elements are stored at addresses starting at "base_addr" and offset by each 32-bit - element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" - (elements are not stored when the corresponding mask bit is not set). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*32 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+31:addr] := a[i+31:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + + Scatter 32-bit integers from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*32 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+31:addr] := a[i+31:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - Scatter 64-bit integers from "a" into memory using 32-bit indices. 64-bit - elements are stored at addresses starting at "base_addr" and offset by each 32-bit - element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*64 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + Scatter 64-bit integers from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*64 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - - Scatter 64-bit integers from "a" into memory using 32-bit indices. 64-bit - elements are stored at addresses starting at "base_addr" and offset by each 32-bit - element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" - (elements are not stored when the corresponding mask bit is not set). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*64 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + + Scatter 64-bit integers from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*64 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - Scatter 64-bit integers from "a" into memory using 32-bit indices. 64-bit - elements are stored at addresses starting at "base_addr" and offset by each 32-bit - element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*64 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + Scatter 64-bit integers from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*64 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - - Scatter 64-bit integers from "a" into memory using 32-bit indices. 64-bit - elements are stored at addresses starting at "base_addr" and offset by each 32-bit - element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" - (elements are not stored when the corresponding mask bit is not set). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*64 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + + Scatter 64-bit integers from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*64 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - Scatter 32-bit integers from "a" into memory using 64-bit indices. 32-bit - elements are stored at addresses starting at "base_addr" and offset by each 64-bit - element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*32 - m := j*64 - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - MEM[addr+31:addr] := a[i+31:i] - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + Scatter 32-bit integers from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*32 + m := j*64 + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + MEM[addr+31:addr] := a[i+31:i] +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - - Scatter 32-bit integers from "a" into memory using 64-bit indices. 32-bit - elements are stored at addresses starting at "base_addr" and offset by each 64-bit - element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" - (elements are not stored when the corresponding mask bit is not set). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*32 - m := j*64 - IF k[j] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - MEM[addr+31:addr] := a[i+31:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + + Scatter 32-bit integers from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*32 + m := j*64 + IF k[j] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + MEM[addr+31:addr] := a[i+31:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - Scatter 32-bit integers from "a" into memory using 64-bit indices. 32-bit - elements are stored at addresses starting at "base_addr" and offset by each 64-bit - element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*32 - m := j*64 - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - MEM[addr+31:addr] := a[i+31:i] - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + Scatter 32-bit integers from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*32 + m := j*64 + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + MEM[addr+31:addr] := a[i+31:i] +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - - Scatter 32-bit integers from "a" into memory using 64-bit indices. 32-bit - elements are stored at addresses starting at "base_addr" and offset by each 64-bit - element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" - (elements are not stored when the corresponding mask bit is not set). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*32 - m := j*64 - IF k[j] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - MEM[addr+31:addr] := a[i+31:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + + Scatter 32-bit integers from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*32 + m := j*64 + IF k[j] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + MEM[addr+31:addr] := a[i+31:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - Scatter 64-bit integers from "a" into memory using 64-bit indices. 64-bit - elements are stored at addresses starting at "base_addr" and offset by each 64-bit - element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*64 - m := j*64 - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + Scatter 64-bit integers from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*64 + m := j*64 + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - - Scatter 64-bit integers from "a" into memory using 64-bit indices. 64-bit - elements are stored at addresses starting at "base_addr" and offset by each 64-bit - element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" - (elements are not stored when the corresponding mask bit is not set). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*64 - m := j*64 - IF k[j] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + + Scatter 64-bit integers from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*64 + m := j*64 + IF k[j] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - Scatter 64-bit integers from "a" into memory using 64-bit indices. 64-bit - elements are stored at addresses starting at "base_addr" and offset by each 64-bit - element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*64 - m := j*64 - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + Scatter 64-bit integers from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*64 + m := j*64 + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - - Scatter 64-bit integers from "a" into memory using 64-bit indices. 64-bit - elements are stored at addresses starting at "base_addr" and offset by each 64-bit - element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" - (elements are not stored when the corresponding mask bit is not set). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*64 - m := j*64 - IF k[j] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + + Scatter 64-bit integers from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*64 + m := j*64 + IF k[j] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - Scatter double-precision (64-bit) floating-point elements from "a" into memory - using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" - and offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale"). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*64 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + Scatter double-precision (64-bit) floating-point elements from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*64 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - - Scatter double-precision (64-bit) floating-point elements from "a" into memory - using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" - and offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is - not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*64 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + + Scatter double-precision (64-bit) floating-point elements from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*64 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - Scatter double-precision (64-bit) floating-point elements from "a" into memory - using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" - and offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale"). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*64 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + Scatter double-precision (64-bit) floating-point elements from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*64 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - - Scatter double-precision (64-bit) floating-point elements from "a" into memory - using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" - and offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is - not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*64 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + + Scatter double-precision (64-bit) floating-point elements from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*64 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - Scatter single-precision (32-bit) floating-point elements from "a" into memory - using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" - and offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale"). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*32 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+31:addr] := a[i+31:i] - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + Scatter single-precision (32-bit) floating-point elements from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*32 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+31:addr] := a[i+31:i] +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - - Scatter single-precision (32-bit) floating-point elements from "a" into memory - using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" - and offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is - not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*32 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+31:addr] := a[i+31:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + + Scatter single-precision (32-bit) floating-point elements from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*32 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+31:addr] := a[i+31:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - Scatter single-precision (32-bit) floating-point elements from "a" into memory - using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" - and offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale"). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*32 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+31:addr] := a[i+31:i] - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + Scatter single-precision (32-bit) floating-point elements from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*32 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+31:addr] := a[i+31:i] +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - - Scatter single-precision (32-bit) floating-point elements from "a" into memory - using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" - and offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is - not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*32 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+31:addr] := a[i+31:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + + Scatter single-precision (32-bit) floating-point elements from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*32 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+31:addr] := a[i+31:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - Scatter double-precision (64-bit) floating-point elements from "a" into memory - using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" - and offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale"). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*64 - m := j*64 - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + Scatter double-precision (64-bit) floating-point elements from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*64 + m := j*64 + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - - Scatter double-precision (64-bit) floating-point elements from "a" into memory - using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" - and offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is - not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*64 - m := j*64 - IF k[j] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + + Scatter double-precision (64-bit) floating-point elements from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*64 + m := j*64 + IF k[j] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - Scatter double-precision (64-bit) floating-point elements from "a" into memory - using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" - and offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale"). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*64 - m := j*64 - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + Scatter double-precision (64-bit) floating-point elements from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*64 + m := j*64 + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - - Scatter double-precision (64-bit) floating-point elements from "a" into memory - using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" - and offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is - not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*64 - m := j*64 - IF k[j] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + + Scatter double-precision (64-bit) floating-point elements from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*64 + m := j*64 + IF k[j] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - Scatter single-precision (32-bit) floating-point elements from "a" into memory - using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" - and offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is - not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*32 - m := j*64 - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - MEM[addr+31:addr] := a[i+31:i] - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + Scatter single-precision (32-bit) floating-point elements from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*32 + m := j*64 + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + MEM[addr+31:addr] := a[i+31:i] +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - - Scatter single-precision (32-bit) floating-point elements from "a" into memory - using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" - and offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is - not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*32 - m := j*64 - IF k[j] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - MEM[addr+31:addr] := a[i+31:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + + Scatter single-precision (32-bit) floating-point elements from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*32 + m := j*64 + IF k[j] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + MEM[addr+31:addr] := a[i+31:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - Scatter single-precision (32-bit) floating-point elements from "a" into memory - using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" - and offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is - not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*32 - m := j*64 - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - MEM[addr+31:addr] := a[i+31:i] - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + Scatter single-precision (32-bit) floating-point elements from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*32 + m := j*64 + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + MEM[addr+31:addr] := a[i+31:i] +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - - - Scatter single-precision (32-bit) floating-point elements from "a" into memory - using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" - and offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is - not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*32 - m := j*64 - IF k[j] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - MEM[addr+31:addr] := a[i+31:i] - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + + + + Scatter single-precision (32-bit) floating-point elements from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*32 + m := j*64 + IF k[j] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + MEM[addr+31:addr] := a[i+31:i] + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - Store 256-bits (composed of 4 packed 64-bit integers) from "a" into memory. - "mem_addr" does not need to be aligned on any particular boundary. - - MEM[mem_addr+255:mem_addr] := a[255:0] - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + Store 256-bits (composed of 4 packed 64-bit integers) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+255:mem_addr] := a[255:0] + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - Store 256-bits (composed of 8 packed 32-bit integers) from "a" into memory. - "mem_addr" does not need to be aligned on any particular boundary. - - MEM[mem_addr+255:mem_addr] := a[255:0] - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + Store 256-bits (composed of 8 packed 32-bit integers) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+255:mem_addr] := a[255:0] + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - Store 128-bits (composed of 2 packed 64-bit integers) from "a" into memory. - "mem_addr" does not need to be aligned on any particular boundary. - - MEM[mem_addr+127:mem_addr] := a[127:0] - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + Store 128-bits (composed of 2 packed 64-bit integers) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+127:mem_addr] := a[127:0] + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - Store 128-bits (composed of 4 packed 32-bit integers) from "a" into memory. - "mem_addr" does not need to be aligned on any particular boundary. - - MEM[mem_addr+127:mem_addr] := a[127:0] - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + Store 128-bits (composed of 4 packed 32-bit integers) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+127:mem_addr] := a[127:0] + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - Store 256-bits (composed of 4 packed 64-bit integers) from "a" into memory. - "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+255:mem_addr] := a[255:0] - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + Store 256-bits (composed of 4 packed 64-bit integers) from "a" into memory. + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+255:mem_addr] := a[255:0] + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - Store 256-bits (composed of 8 packed 32-bit integers) from "a" into memory. - "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+255:mem_addr] := a[255:0] - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + Store 256-bits (composed of 8 packed 32-bit integers) from "a" into memory. + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+255:mem_addr] := a[255:0] + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - Store 128-bits (composed of 2 packed 64-bit integers) from "a" into memory. - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+127:mem_addr] := a[127:0] - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + Store 128-bits (composed of 2 packed 64-bit integers) from "a" into memory. + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+127:mem_addr] := a[127:0] + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - Store 128-bits (composed of 4 packed 32-bit integers) from "a" into memory. - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+127:mem_addr] := a[127:0] - - - AVX512F - AVX512VL -
immintrin.h
- Store + + + + Store 128-bits (composed of 4 packed 32-bit integers) from "a" into memory. + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+127:mem_addr] := a[127:0] + + + AVX512F + AVX512VL +
immintrin.h
+ Store
- - - - - Convert packed signed 32-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - m := j*64 - IF k[j] - dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) - ELSE - dst[m+63:m] := src[m+63:m] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed signed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + m := j*64 + IF k[j] + dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) + ELSE + dst[m+63:m] := src[m+63:m] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed signed 32-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - m := j*64 - IF k[j] - dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) - ELSE - dst[m+63:m] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed signed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + m := j*64 + IF k[j] + dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) + ELSE + dst[m+63:m] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed signed 32-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*32 - m := j*64 - IF k[j] - dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) - ELSE - dst[m+63:m] := src[m+63:m] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed signed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*32 + m := j*64 + IF k[j] + dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) + ELSE + dst[m+63:m] := src[m+63:m] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed signed 32-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*32 - m := j*64 - IF k[j] - dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) - ELSE - dst[m+63:m] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed signed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*32 + m := j*64 + IF k[j] + dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) + ELSE + dst[m+63:m] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed signed 32-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed signed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed signed 32-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed signed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed signed 32-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed signed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed signed 32-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed signed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - l := j*64 - IF k[j] - dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + l := j*64 + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*32 - l := j*64 - IF k[j] - dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*32 + l := j*64 + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed single-precision (32-bit) floating-point elements, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 3 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed single-precision (32-bit) floating-point elements, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - l := j*64 - IF k[j] - dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + l := j*64 + IF k[j] + dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed single-precision (32-bit) floating-point elements, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 1 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed single-precision (32-bit) floating-point elements, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*32 - l := j*64 - IF k[j] - dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*32 + l := j*64 + IF k[j] + dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 32-bit integers, and store the results in "dst". - - FOR j := 0 to 3 - i := 32*j - k := 64*j - dst[i+31:i] := Convert_FP64_To_UInt32(a[k+63:k]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_UInt32(a[k+63:k]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - l := j*64 - IF k[j] - dst[i+31:i] := Convert_FP64_To_UInt32(a[l+63:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + l := j*64 + IF k[j] + dst[i+31:i] := Convert_FP64_To_UInt32(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_UInt32(a[l+63:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UInt32(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 32-bit integers, and store the results in "dst". - - FOR j := 0 to 1 - i := 32*j - k := 64*j - dst[i+31:i] := Convert_FP64_To_UInt32(a[k+63:k]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_UInt32(a[k+63:k]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*32 - l := j*64 - IF k[j] - dst[i+31:i] := Convert_FP64_To_UInt32(a[l+63:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*32 + l := j*64 + IF k[j] + dst[i+31:i] := Convert_FP64_To_UInt32(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_UInt32(a[l+63:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UInt32(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*32 - m := j*16 - IF k[j] - dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + m := j*16 + IF k[j] + dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - m := j*16 - IF k[j] - dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + m := j*16 + IF k[j] + dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - i := j*32 - m := j*16 - IF k[j] - dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + m := j*16 + IF k[j] + dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - m := j*16 - IF k[j] - dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + m := j*16 + IF k[j] + dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). [round_imm_note] - - FOR j := 0 to 7 - i := 16*j - l := 32*j - IF k[j] - dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note] + +FOR j := 0 to 7 + i := 16*j + l := 32*j + IF k[j] + dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). [round_imm_note] - - FOR j := 0 to 7 - i := 16*j - l := 32*j - IF k[j] - dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note] + +FOR j := 0 to 7 + i := 16*j + l := 32*j + IF k[j] + dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [round_imm_note] - - FOR j := 0 to 7 - i := 16*j - l := 32*j - IF k[j] - dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note] + +FOR j := 0 to 7 + i := 16*j + l := 32*j + IF k[j] + dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [round_imm_note] - - FOR j := 0 to 7 - i := 16*j - l := 32*j - IF k[j] - dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note] + +FOR j := 0 to 7 + i := 16*j + l := 32*j + IF k[j] + dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). [round_imm_note] - - FOR j := 0 to 3 - i := 16*j - l := 32*j - IF k[j] - dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note] + +FOR j := 0 to 3 + i := 16*j + l := 32*j + IF k[j] + dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). [round_imm_note] - - FOR j := 0 to 3 - i := 16*j - l := 32*j - IF k[j] - dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note] + +FOR j := 0 to 3 + i := 16*j + l := 32*j + IF k[j] + dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [round_imm_note] - - FOR j := 0 to 3 - i := 16*j - l := 32*j - IF k[j] - dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note] + +FOR j := 0 to 3 + i := 16*j + l := 32*j + IF k[j] + dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [round_imm_note] - - FOR j := 0 to 3 - i := 16*j - l := 32*j - IF k[j] - dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note] + +FOR j := 0 to 3 + i := 16*j + l := 32*j + IF k[j] + dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 32-bit integers, and store the results in "dst". - - FOR j := 0 to 7 - i := 32*j - dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 32-bit integers, and store the results in "dst". - - FOR j := 0 to 3 - i := 32*j - dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 32-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 3 - i := 32*j - k := 64*j - dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[k+63:k]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[k+63:k]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 32-bit integers with truncation, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[l+63:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 32-bit integers with truncation, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[l+63:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 32-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 1 - i := 32*j - k := 64*j - dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[k+63:k]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 1 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[k+63:k]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 32-bit integers with truncation, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 1 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[l+63:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 32-bit integers with truncation, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[l+63:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 32-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 7 - i := 32*j - dst[i+31:i] := Convert_FP32_To_UInt32_Truncate(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + dst[i+31:i] := Convert_FP32_To_UInt32_Truncate(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (32-bit) floating-point elements in "a" to - packed unsigned 32-bit integers with truncation, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed double-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed double-precision (32-bit) floating-point elements in "a" to - packed unsigned 32-bit integers with truncation, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed double-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 32-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 3 - i := 32*j - dst[i+31:i] := Convert_FP32_To_UInt32_Truncate(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := Convert_FP32_To_UInt32_Truncate(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (32-bit) floating-point elements in "a" to - packed unsigned 32-bit integers with truncation, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed double-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed double-precision (32-bit) floating-point elements in "a" to - packed unsigned 32-bit integers with truncation, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed double-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed unsigned 32-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - l := j*32 - dst[i+63:i] := Convert_Int32_To_FP64(a[l+31:l]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_Int32_To_FP64(a[l+31:l]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 32-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_Int32_To_FP64(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_Int32_To_FP64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed unsigned 32-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_Int64_To_FP64(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed unsigned 32-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - l := j*32 - dst[i+63:i] := Convert_Int64_To_FP64(a[l+31:l]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_Int64_To_FP64(a[l+31:l]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 32-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_Int64_To_FP64(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed unsigned 32-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_Int64_To_FP64(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst". - - FOR j := 0 to 7 - i := 32*j - k := 8*j - dst[k+7:k] := Truncate8(a[i+31:i]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 8*j + dst[k+7:k] := Truncate8(a[i+31:i]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - l := 8*j - IF k[j] - dst[l+7:l] := Truncate8(a[i+31:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate8(a[i+31:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, - and store the active results (those with their respective bit set in writemask "k") to - unaligned memory at "base_addr". - - FOR j := 0 to 7 - i := 32*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+31:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 32*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+31:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - l := 8*j - IF k[j] - dst[l+7:l] := Truncate8(a[i+31:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate8(a[i+31:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst". - - FOR j := 0 to 3 - i := 32*j - k := 8*j - dst[k+7:k] := Truncate8(a[i+31:i]) - ENDFOR - dst[MAX:32] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 8*j + dst[k+7:k] := Truncate8(a[i+31:i]) +ENDFOR +dst[MAX:32] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - l := 8*j - IF k[j] - dst[l+7:l] := Truncate8(a[i+31:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:32] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate8(a[i+31:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:32] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, - and store the active results (those with their respective bit set in writemask "k") to - unaligned memory at "base_addr". - - FOR j := 0 to 3 - i := 32*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+31:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 32*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+31:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - l := 8*j - IF k[j] - dst[l+7:l] := Truncate8(a[i+31:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:32] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate8(a[i+31:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:32] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed 32-bit integers in "a" to packed 16-bit integers with - truncation, and store the results in "dst". - - FOR j := 0 to 7 - i := 32*j - k := 16*j - dst[k+15:k] := Truncate16(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 16*j + dst[k+15:k] := Truncate16(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed 32-bit integers in "a" to packed 16-bit integers with - truncation, and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - l := 16*j - IF k[j] - dst[l+15:l] := Truncate16(a[i+31:i]) - ELSE - dst[l+15:l] := src[l+15:l] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Truncate16(a[i+31:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed 32-bit integers in "a" to packed 16-bit integers with - truncation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 7 - i := 32*j - l := 16*j - IF k[j] - MEM[base_addr+l+15:base_addr+l] := Truncate16(a[i+31:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 32*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Truncate16(a[i+31:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed 32-bit integers in "a" to packed 16-bit integers with - truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - l := 16*j - IF k[j] - dst[l+15:l] := Truncate16(a[i+31:i]) - ELSE - dst[l+15:l] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Truncate16(a[i+31:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed 32-bit integers in "a" to packed 16-bit integers with - truncation, and store the results in "dst". - - FOR j := 0 to 3 - i := 32*j - k := 16*j - dst[k+15:k] := Truncate16(a[i+31:i]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 16*j + dst[k+15:k] := Truncate16(a[i+31:i]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed 32-bit integers in "a" to packed 16-bit integers with - truncation, and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - l := 16*j - IF k[j] - dst[l+15:l] := Truncate16(a[i+31:i]) - ELSE - dst[l+15:l] := src[l+15:l] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Truncate16(a[i+31:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed 32-bit integers in "a" to packed 16-bit integers with - truncation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 3 - i := 32*j - l := 16*j - IF k[j] - MEM[base_addr+l+15:base_addr+l] := Truncate16(a[i+31:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 32*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Truncate16(a[i+31:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed 32-bit integers in "a" to packed 16-bit integers with - truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - l := 16*j - IF k[j] - dst[l+15:l] := Truncate16(a[i+31:i]) - ELSE - dst[l+15:l] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Truncate16(a[i+31:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst". - - FOR j := 0 to 3 - i := 64*j - k := 8*j - dst[k+7:k] := Truncate8(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 3 + i := 64*j + k := 8*j + dst[k+7:k] := Truncate8(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 8*j - IF k[j] - dst[l+7:l] := Truncate8(a[i+63:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate8(a[i+63:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, - and store the active results (those with their respective bit set in writemask "k") to - unaligned memory at "base_addr". - - FOR j := 0 to 3 - i := 64*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+63:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 64*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+63:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 8*j - IF k[j] - dst[l+7:l] := Truncate8(a[i+63:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate8(a[i+63:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst". - - FOR j := 0 to 1 - i := 64*j - k := 8*j - dst[k+7:k] := Truncate8(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 8*j + dst[k+7:k] := Truncate8(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 8*j - IF k[j] - dst[l+7:l] := Truncate8(a[i+63:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate8(a[i+63:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, - and store the active results (those with their respective bit set in writemask "k") to - unaligned memory at "base_addr". - - FOR j := 0 to 1 - i := 64*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+63:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 1 + i := 64*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+63:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 8*j - IF k[j] - dst[l+7:l] := Truncate8(a[i+63:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate8(a[i+63:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed 64-bit integers in "a" to packed 32-bit integers with - truncation, and store the results in "dst". - - FOR j := 0 to 3 - i := 64*j - k := 32*j - dst[k+31:k] := Truncate32(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 3 + i := 64*j + k := 32*j + dst[k+31:k] := Truncate32(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed 64-bit integers in "a" to packed 32-bit integers with - truncation, and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 32*j - IF k[j] - dst[l+31:l] := Truncate32(a[i+63:i]) - ELSE - dst[l+31:l] := src[l+31:l] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Truncate32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed 64-bit integers in "a" to packed 32-bit integers with - truncation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 3 - i := 64*j - l := 32*j - IF k[j] - MEM[base_addr+l+31:base_addr+l] := Truncate32(a[i+63:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 64*j + l := 32*j + IF k[j] + MEM[base_addr+l+31:base_addr+l] := Truncate32(a[i+63:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed 64-bit integers in "a" to packed 32-bit integers with - truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 32*j - IF k[j] - dst[l+31:l] := Truncate32(a[i+63:i]) - ELSE - dst[l+31:l] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Truncate32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed 64-bit integers in "a" to packed 32-bit integers with - truncation, and store the results in "dst". - - FOR j := 0 to 1 - i := 64*j - k := 32*j - dst[k+31:k] := Truncate32(a[i+63:i]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 32*j + dst[k+31:k] := Truncate32(a[i+63:i]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed 64-bit integers in "a" to packed 32-bit integers with - truncation, and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 32*j - IF k[j] - dst[l+31:l] := Truncate32(a[i+63:i]) - ELSE - dst[l+31:l] := src[l+31:l] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Truncate32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed 64-bit integers in "a" to packed 32-bit integers with - truncation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 1 - i := 64*j - l := 32*j - IF k[j] - MEM[base_addr+l+31:base_addr+l] := Truncate32(a[i+63:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 1 + i := 64*j + l := 32*j + IF k[j] + MEM[base_addr+l+31:base_addr+l] := Truncate32(a[i+63:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed 64-bit integers in "a" to packed 32-bit integers with - truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 32*j - IF k[j] - dst[l+31:l] := Truncate32(a[i+63:i]) - ELSE - dst[l+31:l] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Truncate32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed 64-bit integers in "a" to packed 16-bit integers with - truncation, and store the results in "dst". - - FOR j := 0 to 3 - i := 64*j - k := 16*j - dst[k+15:k] := Truncate16(a[i+63:i]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 3 + i := 64*j + k := 16*j + dst[k+15:k] := Truncate16(a[i+63:i]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed 64-bit integers in "a" to packed 16-bit integers with - truncation, and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 16*j - IF k[j] - dst[l+15:l] := Truncate16(a[i+63:i]) - ELSE - dst[l+15:l] := src[l+15:l] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Truncate16(a[i+63:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed 64-bit integers in "a" to packed 16-bit integers with - truncation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 3 - i := 64*j - l := 16*j - IF k[j] - MEM[base_addr+l+15:base_addr+l] := Truncate16(a[i+63:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 64*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Truncate16(a[i+63:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed 64-bit integers in "a" to packed 16-bit integers with - truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 16*j - IF k[j] - dst[l+15:l] := Truncate16(a[i+63:i]) - ELSE - dst[l+15:l] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Truncate16(a[i+63:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed 64-bit integers in "a" to packed 16-bit integers with - truncation, and store the results in "dst". - - FOR j := 0 to 1 - i := 64*j - k := 16*j - dst[k+15:k] := Truncate16(a[i+63:i]) - ENDFOR - dst[MAX:32] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 16*j + dst[k+15:k] := Truncate16(a[i+63:i]) +ENDFOR +dst[MAX:32] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed 64-bit integers in "a" to packed 16-bit integers with - truncation, and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 16*j - IF k[j] - dst[l+15:l] := Truncate16(a[i+63:i]) - ELSE - dst[l+15:l] := src[l+15:l] - FI - ENDFOR - dst[MAX:32] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Truncate16(a[i+63:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:32] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed 64-bit integers in "a" to packed 16-bit integers with - truncation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 1 - i := 64*j - l := 16*j - IF k[j] - MEM[base_addr+l+15:base_addr+l] := Truncate16(a[i+63:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 1 + i := 64*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Truncate16(a[i+63:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed 64-bit integers in "a" to packed 16-bit integers with - truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 16*j - IF k[j] - dst[l+15:l] := Truncate16(a[i+63:i]) - ELSE - dst[l+15:l] := 0 - FI - ENDFOR - dst[MAX:32] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Truncate16(a[i+63:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:32] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed signed 32-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst". - - FOR j := 0 to 7 - i := 32*j - k := 8*j - dst[k+7:k] := Saturate8(a[i+31:i]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed signed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 8*j + dst[k+7:k] := Saturate8(a[i+31:i]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed signed 32-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - l := 8*j - IF k[j] - dst[l+7:l] := Saturate8(a[i+31:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed signed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate8(a[i+31:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed signed 32-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 7 - i := 32*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+31:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed signed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 32*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+31:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed signed 32-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - l := 8*j - IF k[j] - dst[l+7:l] := Saturate8(a[i+31:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed signed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate8(a[i+31:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed signed 32-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst". - - FOR j := 0 to 3 - i := 32*j - k := 8*j - dst[k+7:k] := Saturate8(a[i+31:i]) - ENDFOR - dst[MAX:32] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed signed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 8*j + dst[k+7:k] := Saturate8(a[i+31:i]) +ENDFOR +dst[MAX:32] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed signed 32-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - l := 8*j - IF k[j] - dst[l+7:l] := Saturate8(a[i+31:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:32] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed signed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate8(a[i+31:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:32] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed signed 32-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 3 - i := 32*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+31:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed signed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 32*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+31:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed signed 32-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - l := 8*j - IF k[j] - dst[l+7:l] := Saturate8(a[i+31:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:32] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed signed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate8(a[i+31:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:32] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed signed 32-bit integers in "a" to packed 16-bit integers with - signed saturation, and store the results in "dst". - - FOR j := 0 to 7 - i := 32*j - k := 16*j - dst[k+15:k] := Saturate16(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed signed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 16*j + dst[k+15:k] := Saturate16(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed signed 32-bit integers in "a" to packed 16-bit integers with - signed saturation, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - l := 16*j - IF k[j] - dst[l+15:l] := Saturate16(a[i+31:i]) - ELSE - dst[l+15:l] := src[l+15:l] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed signed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate16(a[i+31:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed signed 32-bit integers in "a" to packed 16-bit integers with - signed saturation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 7 - i := 32*j - l := 16*j - IF k[j] - MEM[base_addr+l+15:base_addr+l] := Saturate16(a[i+31:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed signed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 32*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Saturate16(a[i+31:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed signed 32-bit integers in "a" to packed 16-bit integers with - signed saturation, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - l := 16*j - IF k[j] - dst[l+15:l] := Saturate16(a[i+31:i]) - ELSE - dst[l+15:l] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed signed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate16(a[i+31:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed signed 32-bit integers in "a" to packed 16-bit integers with - signed saturation, and store the results in "dst". - - FOR j := 0 to 3 - i := 32*j - k := 16*j - dst[k+15:k] := Saturate16(a[i+31:i]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed signed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 16*j + dst[k+15:k] := Saturate16(a[i+31:i]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed signed 32-bit integers in "a" to packed 16-bit integers with - signed saturation, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - l := 16*j - IF k[j] - dst[l+15:l] := Saturate16(a[i+31:i]) - ELSE - dst[l+15:l] := src[l+15:l] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed signed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate16(a[i+31:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed signed 32-bit integers in "a" to packed 16-bit integers with - signed saturation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 3 - i := 32*j - l := 16*j - IF k[j] - MEM[base_addr+l+15:base_addr+l] := Saturate16(a[i+31:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed signed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 32*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Saturate16(a[i+31:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed signed 32-bit integers in "a" to packed 16-bit integers with - signed saturation, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - l := 16*j - IF k[j] - dst[l+15:l] := Saturate16(a[i+31:i]) - ELSE - dst[l+15:l] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed signed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate16(a[i+31:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed signed 64-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst". - - FOR j := 0 to 3 - i := 64*j - k := 8*j - dst[k+7:k] := Saturate8(a[i+63:i]) - ENDFOR - dst[MAX:32] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed signed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := 64*j + k := 8*j + dst[k+7:k] := Saturate8(a[i+63:i]) +ENDFOR +dst[MAX:32] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed signed 64-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 8*j - IF k[j] - dst[l+7:l] := Saturate8(a[i+63:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:32] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed signed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate8(a[i+63:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:32] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed signed 64-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 3 - i := 64*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+63:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed signed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 64*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+63:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed signed 64-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 8*j - IF k[j] - dst[l+7:l] := Saturate8(a[i+63:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:32] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed signed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate8(a[i+63:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:32] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed signed 64-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst". - - FOR j := 0 to 1 - i := 64*j - k := 8*j - dst[k+7:k] := Saturate8(a[i+63:i]) - ENDFOR - dst[MAX:16] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed signed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 8*j + dst[k+7:k] := Saturate8(a[i+63:i]) +ENDFOR +dst[MAX:16] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed signed 64-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 8*j - IF k[j] - dst[l+7:l] := Saturate8(a[i+63:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:16] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed signed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate8(a[i+63:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:16] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed signed 64-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 1 - i := 64*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+63:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed signed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 1 + i := 64*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+63:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed signed 64-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 8*j - IF k[j] - dst[l+7:l] := Saturate8(a[i+63:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:16] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed signed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate8(a[i+63:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:16] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed signed 64-bit integers in "a" to packed 32-bit integers with - signed saturation, and store the results in "dst". - - FOR j := 0 to 3 - i := 64*j - k := 32*j - dst[k+31:k] := Saturate32(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed signed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := 64*j + k := 32*j + dst[k+31:k] := Saturate32(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed signed 64-bit integers in "a" to packed 32-bit integers with - signed saturation, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 32*j - IF k[j] - dst[l+31:l] := Saturate32(a[i+63:i]) - ELSE - dst[l+31:l] := src[l+31:l] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed signed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Saturate32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed signed 64-bit integers in "a" to packed 32-bit integers with - signed saturation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 3 - i := 64*j - l := 32*j - IF k[j] - MEM[base_addr+l+31:base_addr+l] := Saturate32(a[i+63:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed signed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 64*j + l := 32*j + IF k[j] + MEM[base_addr+l+31:base_addr+l] := Saturate32(a[i+63:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed signed 64-bit integers in "a" to packed 32-bit integers with - signed saturation, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 32*j - IF k[j] - dst[l+31:l] := Saturate32(a[i+63:i]) - ELSE - dst[l+31:l] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed signed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Saturate32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed signed 64-bit integers in "a" to packed 32-bit integers with - signed saturation, and store the results in "dst". - - FOR j := 0 to 1 - i := 64*j - k := 32*j - dst[k+31:k] := Saturate32(a[i+63:i]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed signed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 32*j + dst[k+31:k] := Saturate32(a[i+63:i]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed signed 64-bit integers in "a" to packed 32-bit integers with - signed saturation, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 32*j - IF k[j] - dst[l+31:l] := Saturate32(a[i+63:i]) - ELSE - dst[l+31:l] := src[l+31:l] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed signed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Saturate32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed signed 64-bit integers in "a" to packed 32-bit integers with - signed saturation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 1 - i := 64*j - l := 32*j - IF k[j] - MEM[base_addr+l+31:base_addr+l] := Saturate32(a[i+63:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed signed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 1 + i := 64*j + l := 32*j + IF k[j] + MEM[base_addr+l+31:base_addr+l] := Saturate32(a[i+63:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed signed 64-bit integers in "a" to packed 32-bit integers with - signed saturation, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 32*j - IF k[j] - dst[l+31:l] := Saturate32(a[i+63:i]) - ELSE - dst[l+31:l] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed signed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Saturate32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed signed 64-bit integers in "a" to packed 16-bit integers with - signed saturation, and store the results in "dst". - - FOR j := 0 to 3 - i := 64*j - k := 16*j - dst[k+15:k] := Saturate16(a[i+63:i]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed signed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := 64*j + k := 16*j + dst[k+15:k] := Saturate16(a[i+63:i]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed signed 64-bit integers in "a" to packed 16-bit integers with - signed saturation, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 16*j - IF k[j] - dst[l+15:l] := Saturate16(a[i+63:i]) - ELSE - dst[l+15:l] := src[l+15:l] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed signed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate16(a[i+63:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed signed 64-bit integers in "a" to packed 16-bit integers with - signed saturation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 3 - i := 64*j - l := 16*j - IF k[j] - MEM[base_addr+l+15:base_addr+l] := Saturate16(a[i+63:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed signed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 64*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Saturate16(a[i+63:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed signed 64-bit integers in "a" to packed 16-bit integers with - signed saturation, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 16*j - IF k[j] - dst[l+15:l] := Saturate16(a[i+63:i]) - ELSE - dst[l+15:l] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed signed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate16(a[i+63:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed signed 64-bit integers in "a" to packed 16-bit integers with - signed saturation, and store the results in "dst". - - FOR j := 0 to 1 - i := 64*j - k := 16*j - dst[k+15:k] := Saturate16(a[i+63:i]) - ENDFOR - dst[MAX:32] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed signed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 16*j + dst[k+15:k] := Saturate16(a[i+63:i]) +ENDFOR +dst[MAX:32] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed signed 64-bit integers in "a" to packed 16-bit integers with - signed saturation, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 16*j - IF k[j] - dst[l+15:l] := Saturate16(a[i+63:i]) - ELSE - dst[l+15:l] := src[l+15:l] - FI - ENDFOR - dst[MAX:32] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed signed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate16(a[i+63:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:32] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed signed 64-bit integers in "a" to packed 16-bit integers with - signed saturation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 1 - i := 64*j - l := 16*j - IF k[j] - MEM[base_addr+l+15:base_addr+l] := Saturate16(a[i+63:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed signed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 1 + i := 64*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Saturate16(a[i+63:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed signed 64-bit integers in "a" to packed 16-bit integers with - signed saturation, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 16*j - IF k[j] - dst[l+15:l] := Saturate16(a[i+63:i]) - ELSE - dst[l+15:l] := 0 - FI - ENDFOR - dst[MAX:32] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed signed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate16(a[i+63:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:32] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 32-bit - integers, and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - l := 8*j - IF k[j] - dst[i+31:i] := SignExtend32(a[l+7:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 8*j + IF k[j] + dst[i+31:i] := SignExtend32(a[l+7:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 32-bit - integers, and store the results in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - l := 8*j - IF k[j] - dst[i+31:i] := SignExtend32(a[l+7:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 8*j + IF k[j] + dst[i+31:i] := SignExtend32(a[l+7:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Sign extend packed 8-bit integers in the low 4 bytes of "a" to packed 32-bit - integers, and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - l := 8*j - IF k[j] - dst[i+31:i] := SignExtend32(a[l+7:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Sign extend packed 8-bit integers in the low 4 bytes of "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 8*j + IF k[j] + dst[i+31:i] := SignExtend32(a[l+7:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Sign extend packed 8-bit integers in the low 4 bytes of "a" to packed 32-bit - integers, and store the results in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - l := 8*j - IF k[j] - dst[i+31:i] := SignExtend32(a[l+7:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Sign extend packed 8-bit integers in the low 4 bytes of "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 8*j + IF k[j] + dst[i+31:i] := SignExtend32(a[l+7:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Sign extend packed 8-bit integers in the low 4 bytes of "a" to packed 64-bit - integers, and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 8*j - IF k[j] - dst[i+63:i] := SignExtend64(a[l+7:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Sign extend packed 8-bit integers in the low 4 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 8*j + IF k[j] + dst[i+63:i] := SignExtend64(a[l+7:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Sign extend packed 8-bit integers in the low 4 bytes of "a" to packed 64-bit - integers, and store the results in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 8*j - IF k[j] - dst[i+63:i] := SignExtend64(a[l+7:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Sign extend packed 8-bit integers in the low 4 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 8*j + IF k[j] + dst[i+63:i] := SignExtend64(a[l+7:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Sign extend packed 8-bit integers in the low 2 bytes of "a" to packed 64-bit - integers, and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 8*j - IF k[j] - dst[i+63:i] := SignExtend64(a[l+7:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Sign extend packed 8-bit integers in the low 2 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 8*j + IF k[j] + dst[i+63:i] := SignExtend64(a[l+7:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Sign extend packed 8-bit integers in the low 2 bytes of "a" to packed 64-bit - integers, and store the results in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 8*j - IF k[j] - dst[i+63:i] := SignExtend64(a[l+7:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Sign extend packed 8-bit integers in the low 2 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 8*j + IF k[j] + dst[i+63:i] := SignExtend64(a[l+7:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 32*j - IF k[j] - dst[i+63:i] := SignExtend64(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := SignExtend64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 32*j - IF k[j] - dst[i+63:i] := SignExtend64(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := SignExtend64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 32*j - IF k[j] - dst[i+63:i] := SignExtend64(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := SignExtend64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 32*j - IF k[j] - dst[i+63:i] := SignExtend64(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := SignExtend64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - l := j*16 - IF k[j] - dst[i+31:i] := SignExtend32(a[l+15:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + l := j*16 + IF k[j] + dst[i+31:i] := SignExtend32(a[l+15:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - l := 16*j - IF k[j] - dst[i+31:i] := SignExtend32(a[l+15:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 16*j + IF k[j] + dst[i+31:i] := SignExtend32(a[l+15:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - l := j*16 - IF k[j] - dst[i+31:i] := SignExtend32(a[l+15:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + l := j*16 + IF k[j] + dst[i+31:i] := SignExtend32(a[l+15:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - l := 16*j - IF k[j] - dst[i+31:i] := SignExtend32(a[l+15:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 16*j + IF k[j] + dst[i+31:i] := SignExtend32(a[l+15:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Sign extend packed 16-bit integers in the low 8 bytes of "a" to packed 64-bit - integers, and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 16*j - IF k[j] - dst[i+63:i] := SignExtend64(a[l+15:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Sign extend packed 16-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 16*j + IF k[j] + dst[i+63:i] := SignExtend64(a[l+15:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Sign extend packed 16-bit integers in the low 8 bytes of "a" to packed 64-bit - integers, and store the results in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 16*j - IF k[j] - dst[i+63:i] := SignExtend64(a[l+15:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Sign extend packed 16-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 16*j + IF k[j] + dst[i+63:i] := SignExtend64(a[l+15:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Sign extend packed 16-bit integers in the low 4 bytes of "a" to packed 64-bit - integers, and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 16*j - IF k[j] - dst[i+63:i] := SignExtend64(a[l+15:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Sign extend packed 16-bit integers in the low 4 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 16*j + IF k[j] + dst[i+63:i] := SignExtend64(a[l+15:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Sign extend packed 16-bit integers in the low 4 bytes of "a" to packed 64-bit - integers, and store the results in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 16*j - IF k[j] - dst[i+63:i] := SignExtend64(a[l+15:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Sign extend packed 16-bit integers in the low 4 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 16*j + IF k[j] + dst[i+63:i] := SignExtend64(a[l+15:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst". - - FOR j := 0 to 7 - i := 32*j - k := 8*j - dst[k+7:k] := SaturateU8(a[i+31:i]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 8*j + dst[k+7:k] := SaturateU8(a[i+31:i]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - l := 8*j - IF k[j] - dst[l+7:l] := SaturateU8(a[i+31:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := SaturateU8(a[i+31:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the active results (those with their - respective bit set in writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 7 - i := 32*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+31:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 32*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+31:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - l := 8*j - IF k[j] - dst[l+7:l] := SaturateU8(a[i+31:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := SaturateU8(a[i+31:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst". - - FOR j := 0 to 3 - i := 32*j - k := 8*j - dst[k+7:k] := SaturateU8(a[i+31:i]) - ENDFOR - dst[MAX:32] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 8*j + dst[k+7:k] := SaturateU8(a[i+31:i]) +ENDFOR +dst[MAX:32] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - l := 8*j - IF k[j] - dst[l+7:l] := SaturateU8(a[i+31:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:32] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := SaturateU8(a[i+31:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:32] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the active results (those with their - respective bit set in writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 3 - i := 32*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+31:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 32*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+31:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - l := 8*j - IF k[j] - dst[l+7:l] := SaturateU8(a[i+31:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:32] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := SaturateU8(a[i+31:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:32] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit - integers with unsigned saturation, and store the results in "dst". - - FOR j := 0 to 7 - i := 32*j - k := 16*j - dst[k+15:k] := SaturateU16(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 16*j + dst[k+15:k] := SaturateU16(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit - integers with unsigned saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - l := 16*j - IF k[j] - dst[l+15:l] := SaturateU16(a[i+31:i]) - ELSE - dst[l+15:l] := src[l+15:l] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := SaturateU16(a[i+31:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit - integers with unsigned saturation, and store the active results (those with their - respective bit set in writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 7 - i := 32*j - l := 16*j - IF k[j] - MEM[base_addr+l+15:base_addr+l] := SaturateU16(a[i+31:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 32*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := SaturateU16(a[i+31:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit - integers with unsigned saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - l := 16*j - IF k[j] - dst[l+15:l] := SaturateU16(a[i+31:i]) - ELSE - dst[l+15:l] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := SaturateU16(a[i+31:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit - integers with unsigned saturation, and store the results in "dst". - - FOR j := 0 to 3 - i := 32*j - k := 16*j - dst[k+15:k] := SaturateU16(a[i+31:i]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 16*j + dst[k+15:k] := SaturateU16(a[i+31:i]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit - integers with unsigned saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - l := 16*j - IF k[j] - dst[l+15:l] := SaturateU16(a[i+31:i]) - ELSE - dst[l+15:l] := src[l+15:l] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := SaturateU16(a[i+31:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit - integers with unsigned saturation, and store the active results (those with their - respective bit set in writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 3 - i := 32*j - l := 16*j - IF k[j] - MEM[base_addr+l+15:base_addr+l] := SaturateU16(a[i+31:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 32*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := SaturateU16(a[i+31:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit - integers with unsigned saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - l := 16*j - IF k[j] - dst[l+15:l] := SaturateU16(a[i+31:i]) - ELSE - dst[l+15:l] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := SaturateU16(a[i+31:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst". - - FOR j := 0 to 3 - i := 64*j - k := 8*j - dst[k+7:k] := SaturateU8(a[i+63:i]) - ENDFOR - dst[MAX:32] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := 64*j + k := 8*j + dst[k+7:k] := SaturateU8(a[i+63:i]) +ENDFOR +dst[MAX:32] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 8*j - IF k[j] - dst[l+7:l] := SaturateU8(a[i+63:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:32] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := SaturateU8(a[i+63:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:32] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the active results (those with their - respective bit set in writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 3 - i := 64*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+63:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 64*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+63:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 8*j - IF k[j] - dst[l+7:l] := SaturateU8(a[i+63:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:32] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := SaturateU8(a[i+63:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:32] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst". - - FOR j := 0 to 1 - i := 64*j - k := 8*j - dst[k+7:k] := SaturateU8(a[i+63:i]) - ENDFOR - dst[MAX:16] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 8*j + dst[k+7:k] := SaturateU8(a[i+63:i]) +ENDFOR +dst[MAX:16] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 8*j - IF k[j] - dst[l+7:l] := SaturateU8(a[i+63:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:16] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := SaturateU8(a[i+63:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:16] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the active results (those with their - respective bit set in writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 1 - i := 64*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+63:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 1 + i := 64*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+63:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 8*j - IF k[j] - dst[l+7:l] := SaturateU8(a[i+63:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:16] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := SaturateU8(a[i+63:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:16] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit - integers with unsigned saturation, and store the results in "dst". - - FOR j := 0 to 3 - i := 64*j - k := 32*j - dst[k+31:k] := SaturateU32(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := 64*j + k := 32*j + dst[k+31:k] := SaturateU32(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit - integers with unsigned saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 32*j - IF k[j] - dst[l+31:l] := SaturateU32(a[i+63:i]) - ELSE - dst[l+31:l] := src[l+31:l] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := SaturateU32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit - integers with unsigned saturation, and store the active results (those with their - respective bit set in writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 3 - i := 64*j - l := 32*j - IF k[j] - MEM[base_addr+l+31:base_addr+l] := SaturateU32(a[i+63:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 64*j + l := 32*j + IF k[j] + MEM[base_addr+l+31:base_addr+l] := SaturateU32(a[i+63:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit - integers with unsigned saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 32*j - IF k[j] - dst[l+31:l] := SaturateU32(a[i+63:i]) - ELSE - dst[l+31:l] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := SaturateU32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit - integers with unsigned saturation, and store the results in "dst". - - FOR j := 0 to 1 - i := 64*j - k := 32*j - dst[k+31:k] := SaturateU32(a[i+63:i]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 32*j + dst[k+31:k] := SaturateU32(a[i+63:i]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit - integers with unsigned saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 32*j - IF k[j] - dst[l+31:l] := SaturateU32(a[i+63:i]) - ELSE - dst[l+31:l] := src[l+31:l] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := SaturateU32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit - integers with unsigned saturation, and store the active results (those with their - respective bit set in writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 1 - i := 64*j - l := 32*j - IF k[j] - MEM[base_addr+l+31:base_addr+l] := SaturateU32(a[i+63:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 1 + i := 64*j + l := 32*j + IF k[j] + MEM[base_addr+l+31:base_addr+l] := SaturateU32(a[i+63:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit - integers with unsigned saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 32*j - IF k[j] - dst[l+31:l] := SaturateU32(a[i+63:i]) - ELSE - dst[l+31:l] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := SaturateU32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit - integers with unsigned saturation, and store the results in "dst". - - FOR j := 0 to 3 - i := 64*j - k := 16*j - dst[k+15:k] := SaturateU16(a[i+63:i]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := 64*j + k := 16*j + dst[k+15:k] := SaturateU16(a[i+63:i]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit - integers with unsigned saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 16*j - IF k[j] - dst[l+15:l] := SaturateU16(a[i+63:i]) - ELSE - dst[l+15:l] := src[l+15:l] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := SaturateU16(a[i+63:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit - integers with unsigned saturation, and store the active results (those with their - respective bit set in writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 3 - i := 64*j - l := 16*j - IF k[j] - MEM[base_addr+l+15:base_addr+l] := SaturateU16(a[i+63:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 3 + i := 64*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := SaturateU16(a[i+63:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit - integers with unsigned saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 16*j - IF k[j] - dst[l+15:l] := SaturateU16(a[i+63:i]) - ELSE - dst[l+15:l] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := SaturateU16(a[i+63:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit - integers with unsigned saturation, and store the results in "dst". - - FOR j := 0 to 1 - i := 64*j - k := 16*j - dst[k+15:k] := SaturateU16(a[i+63:i]) - ENDFOR - dst[MAX:32] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 16*j + dst[k+15:k] := SaturateU16(a[i+63:i]) +ENDFOR +dst[MAX:32] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit - integers with unsigned saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 16*j - IF k[j] - dst[l+15:l] := SaturateU16(a[i+63:i]) - ELSE - dst[l+15:l] := src[l+15:l] - FI - ENDFOR - dst[MAX:32] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := SaturateU16(a[i+63:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:32] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- Store - - - - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit - integers with unsigned saturation, and store the active results (those with their - respective bit set in writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 1 - i := 64*j - l := 16*j - IF k[j] - MEM[base_addr+l+15:base_addr+l] := SaturateU16(a[i+63:i]) - FI - ENDFOR - - - AVX512F - AVX512VL -
immintrin.h
- Convert + Store + + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 1 + i := 64*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := SaturateU16(a[i+63:i]) + FI +ENDFOR + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit - integers with unsigned saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 16*j - IF k[j] - dst[l+15:l] := SaturateU16(a[i+63:i]) - ELSE - dst[l+15:l] := 0 - FI - ENDFOR - dst[MAX:32] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := SaturateU16(a[i+63:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:32] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Zero extend packed unsigned 8-bit integers in the low 8 bytes of "a" to packed - 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - l := 8*j - IF k[j] - dst[i+31:i] := ZeroExtend32(a[l+7:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Zero extend packed unsigned 8-bit integers in the low 8 bytes of "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 8*j + IF k[j] + dst[i+31:i] := ZeroExtend32(a[l+7:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Zero extend packed unsigned 8-bit integers in the low 8 bytes of "a" to packed - 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - l := 8*j - IF k[j] - dst[i+31:i] := ZeroExtend32(a[l+7:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Zero extend packed unsigned 8-bit integers in the low 8 bytes of "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 8*j + IF k[j] + dst[i+31:i] := ZeroExtend32(a[l+7:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Zero extend packed unsigned 8-bit integers in the low 4 bytes of "a" to packed - 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - l := 8*j - IF k[j] - dst[i+31:i] := ZeroExtend32(a[l+7:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Zero extend packed unsigned 8-bit integers in the low 4 bytes of "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 8*j + IF k[j] + dst[i+31:i] := ZeroExtend32(a[l+7:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Zero extend packed unsigned 8-bit integers in th elow 4 bytes of "a" to packed - 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - l := 8*j - IF k[j] - dst[i+31:i] := ZeroExtend32(a[l+7:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Zero extend packed unsigned 8-bit integers in th elow 4 bytes of "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 8*j + IF k[j] + dst[i+31:i] := ZeroExtend32(a[l+7:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Zero extend packed unsigned 8-bit integers in the low 4 bytes of "a" to packed - 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 8*j - IF k[j] - dst[i+63:i] := ZeroExtend64(a[l+7:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Zero extend packed unsigned 8-bit integers in the low 4 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 8*j + IF k[j] + dst[i+63:i] := ZeroExtend64(a[l+7:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Zero extend packed unsigned 8-bit integers in the low 4 bytes of "a" to packed - 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 8*j - IF k[j] - dst[i+63:i] := ZeroExtend64(a[l+7:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Zero extend packed unsigned 8-bit integers in the low 4 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 8*j + IF k[j] + dst[i+63:i] := ZeroExtend64(a[l+7:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Zero extend packed unsigned 8-bit integers in the low 2 bytes of "a" to packed - 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 8*j - IF k[j] - dst[i+63:i] := ZeroExtend64(a[l+7:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Zero extend packed unsigned 8-bit integers in the low 2 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 8*j + IF k[j] + dst[i+63:i] := ZeroExtend64(a[l+7:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Zero extend packed unsigned 8-bit integers in the low 2 bytes of "a" to packed - 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 8*j - IF k[j] - dst[i+63:i] := ZeroExtend64(a[l+7:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Zero extend packed unsigned 8-bit integers in the low 2 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 8*j + IF k[j] + dst[i+63:i] := ZeroExtend64(a[l+7:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 32*j - IF k[j] - dst[i+63:i] := ZeroExtend64(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := ZeroExtend64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 32*j - IF k[j] - dst[i+63:i] := ZeroExtend64(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := ZeroExtend64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 32*j - IF k[j] - dst[i+63:i] := ZeroExtend64(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := ZeroExtend64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 32*j - IF k[j] - dst[i+63:i] := ZeroExtend64(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := ZeroExtend64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - l := 16*j - IF k[j] - dst[i+31:i] := ZeroExtend32(a[l+15:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 16*j + IF k[j] + dst[i+31:i] := ZeroExtend32(a[l+15:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - l := 16*j - IF k[j] - dst[i+31:i] := ZeroExtend32(a[l+15:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 16*j + IF k[j] + dst[i+31:i] := ZeroExtend32(a[l+15:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - l := 16*j - IF k[j] - dst[i+31:i] := ZeroExtend32(a[l+15:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 16*j + IF k[j] + dst[i+31:i] := ZeroExtend32(a[l+15:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 32*j - l := 16*j - IF k[j] - dst[i+31:i] := ZeroExtend32(a[l+15:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 32*j + l := 16*j + IF k[j] + dst[i+31:i] := ZeroExtend32(a[l+15:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Zero extend packed unsigned 16-bit integers in the low 8 bytes of "a" to packed - 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 16*j - IF k[j] - dst[i+63:i] := ZeroExtend64(a[l+15:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Zero extend packed unsigned 16-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 16*j + IF k[j] + dst[i+63:i] := ZeroExtend64(a[l+15:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Zero extend packed unsigned 16-bit integers in the low 8 bytes of "a" to packed - 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := 64*j - l := 16*j - IF k[j] - dst[i+63:i] := ZeroExtend64(a[l+15:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Zero extend packed unsigned 16-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := 64*j + l := 16*j + IF k[j] + dst[i+63:i] := ZeroExtend64(a[l+15:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Zero extend packed unsigned 16-bit integers in the low 4 bytes of "a" to packed - 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 16*j - IF k[j] - dst[i+63:i] := ZeroExtend64(a[l+15:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + + Zero extend packed unsigned 16-bit integers in the low 4 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 16*j + IF k[j] + dst[i+63:i] := ZeroExtend64(a[l+15:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - Zero extend packed unsigned 16-bit integers in the low 4 bytes of "a" to packed - 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := 64*j - l := 16*j - IF k[j] - dst[i+63:i] := ZeroExtend64(a[l+15:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Convert + + + + Zero extend packed unsigned 16-bit integers in the low 4 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := 64*j + l := 16*j + IF k[j] + dst[i+63:i] := ZeroExtend64(a[l+15:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Convert
- - - - - Load contiguous active double-precision (64-bit) floating-point elements from - unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] - m := m + 64 - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + Load contiguous active double-precision (64-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] + m := m + 64 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - Load contiguous active double-precision (64-bit) floating-point elements from - unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] - m := m + 64 - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + Load contiguous active double-precision (64-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] + m := m + 64 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - Load contiguous active double-precision (64-bit) floating-point elements from - unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] - m := m + 64 - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + Load contiguous active double-precision (64-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] + m := m + 64 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - Load contiguous active double-precision (64-bit) floating-point elements from - unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] - m := m + 64 - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + Load contiguous active double-precision (64-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] + m := m + 64 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - Load contiguous active single-precision (32-bit) floating-point elements from - unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] - m := m + 32 - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + Load contiguous active single-precision (32-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] + m := m + 32 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - Load contiguous active single-precision (32-bit) floating-point elements from - unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] - m := m + 32 - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + Load contiguous active single-precision (32-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] + m := m + 32 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - Load contiguous active single-precision (32-bit) floating-point elements from - unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] - m := m + 32 - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + Load contiguous active single-precision (32-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] + m := m + 32 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - Load contiguous active single-precision (32-bit) floating-point elements from - unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] - m := m + 32 - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + Load contiguous active single-precision (32-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] + m := m + 32 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - - - Gather double-precision (64-bit) floating-point elements from memory using - 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and - offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 - or 8. - - FOR j := 0 to 3 - i := j*64 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + + + Gather double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*64 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - - - Gather double-precision (64-bit) floating-point elements from memory using - 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and - offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 - or 8. - - FOR j := 0 to 1 - i := j*64 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + + + Gather double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*64 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - - - Gather single-precision (32-bit) floating-point elements from memory using - 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and - offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 - or 8. - - FOR j := 0 to 7 - i := j*32 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + + + Gather single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*32 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - - - Gather single-precision (32-bit) floating-point elements from memory using - 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and - offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 - or 8. - - FOR j := 0 to 3 - i := j*32 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + + + Gather single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*32 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - - - Gather double-precision (64-bit) floating-point elements from memory using - 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and - offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 - or 8. - - FOR j := 0 to 3 - i := j*64 - m := j*64 - IF k[j] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + + + Gather double-precision (64-bit) floating-point elements from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*64 + m := j*64 + IF k[j] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - - - Gather double-precision (64-bit) floating-point elements from memory using - 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and - offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 - or 8. - - FOR j := 0 to 1 - i := j*64 - m := j*64 - IF k[j] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + + + Gather double-precision (64-bit) floating-point elements from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*64 + m := j*64 + IF k[j] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - - - Gather single-precision (32-bit) floating-point elements from memory using - 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and - offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 - or 8. - - FOR j := 0 to 3 - i := j*32 - m := j*64 - IF k[j] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + + + Gather single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*32 + m := j*64 + IF k[j] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - - - Gather single-precision (32-bit) floating-point elements from memory using - 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and - offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 - or 8. - - FOR j := 0 to 1 - i := j*32 - m := j*64 - IF k[j] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + + + Gather single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*32 + m := j*64 + IF k[j] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - Load packed double-precision (64-bit) floating-point elements from memory into - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). "mem_addr" must be aligned on a 32-byte boundary or a - general-protection exception may be generated. - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + Load packed double-precision (64-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - Load packed double-precision (64-bit) floating-point elements from memory into - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception - may be generated. - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + Load packed double-precision (64-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - Load packed double-precision (64-bit) floating-point elements from memory into - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). "mem_addr" must be aligned on a 16-byte boundary or a - general-protection exception may be generated. - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + Load packed double-precision (64-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - Load packed double-precision (64-bit) floating-point elements from memory into - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception - may be generated. - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + Load packed double-precision (64-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - Load packed single-precision (32-bit) floating-point elements from memory into - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). "mem_addr" must be aligned on a 32-byte boundary or a - general-protection exception may be generated. - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - Load packed single-precision (32-bit) floating-point elements from memory into - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception - may be generated. - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - Load packed single-precision (32-bit) floating-point elements from memory into - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). "mem_addr" must be aligned on a 16-byte boundary or a - general-protection exception may be generated. - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - Load packed single-precision (32-bit) floating-point elements from memory into - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception - may be generated. - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - Load packed 32-bit integers from memory into "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may - be generated. - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + Load packed 32-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - Load packed 32-bit integers from memory into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may - be generated. - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + Load packed 32-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - Load packed 32-bit integers from memory into "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + Load packed 32-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - Load packed 32-bit integers from memory into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + Load packed 32-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - Load packed 64-bit integers from memory into "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may - be generated. - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + Load packed 64-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - Load packed 64-bit integers from memory into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may - be generated. - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + Load packed 64-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - Load packed 64-bit integers from memory into "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + Load packed 64-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - Load packed 64-bit integers from memory into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + Load packed 64-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - Load packed 32-bit integers from memory into "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + Load packed 32-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - Load packed 32-bit integers from memory into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + Load packed 32-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - Load packed 32-bit integers from memory into "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + Load packed 32-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - Load packed 32-bit integers from memory into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + Load packed 32-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - Load packed 64-bit integers from memory into "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + Load packed 64-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - Load packed 64-bit integers from memory into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + Load packed 64-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - Load packed 64-bit integers from memory into "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + Load packed 64-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - Load packed 64-bit integers from memory into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + Load packed 64-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - Load packed double-precision (64-bit) floating-point elements from memoy into - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + Load packed double-precision (64-bit) floating-point elements from memoy into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - Load packed double-precision (64-bit) floating-point elements from memoy into - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + Load packed double-precision (64-bit) floating-point elements from memoy into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - Load packed double-precision (64-bit) floating-point elements from memoy into - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + Load packed double-precision (64-bit) floating-point elements from memoy into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - Load packed double-precision (64-bit) floating-point elements from memoy into - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + Load packed double-precision (64-bit) floating-point elements from memoy into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - Load packed single-precision (32-bit) floating-point elements from memory into - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - Load packed single-precision (32-bit) floating-point elements from memory into - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - Load packed single-precision (32-bit) floating-point elements from memory into - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - Load packed single-precision (32-bit) floating-point elements from memory into - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - Load contiguous active 32-bit integers from unaligned memory at "mem_addr" - (those with their respective bit set in mask "k"), and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - m := 0 - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] - m := m + 32 - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + Load contiguous active 32-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] + m := m + 32 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - Load contiguous active 32-bit integers from unaligned memory at "mem_addr" - (those with their respective bit set in mask "k"), and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] - m := m + 32 - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + Load contiguous active 32-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] + m := m + 32 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - Load contiguous active 32-bit integers from unaligned memory at "mem_addr" - (those with their respective bit set in mask "k"), and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - m := 0 - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] - m := m + 32 - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + Load contiguous active 32-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] + m := m + 32 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - Load contiguous active 32-bit integers from unaligned memory at "mem_addr" - (those with their respective bit set in mask "k"), and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] - m := m + 32 - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + Load contiguous active 32-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] + m := m + 32 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - Load contiguous active 64-bit integers from unaligned memory at "mem_addr" - (those with their respective bit set in mask "k"), and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - m := 0 - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] - m := m + 64 - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + Load contiguous active 64-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] + m := m + 64 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - Load contiguous active 64-bit integers from unaligned memory at "mem_addr" - (those with their respective bit set in mask "k"), and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] - m := m + 64 - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + Load contiguous active 64-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] + m := m + 64 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - Load contiguous active 64-bit integers from unaligned memory at "mem_addr" - (those with their respective bit set in mask "k"), and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - m := 0 - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] - m := m + 64 - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + Load contiguous active 64-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] + m := m + 64 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - Load contiguous active 64-bit integers from unaligned memory at "mem_addr" - (those with their respective bit set in mask "k"), and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] - m := m + 64 - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + Load contiguous active 64-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] + m := m + 64 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - - - Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are - loaded from addresses starting at "base_addr" and offset by each 32-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst" using writemask "k" (elements are copied from "src" when the corresponding - mask bit is not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*32 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + + + Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*32 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - - - Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are - loaded from addresses starting at "base_addr" and offset by each 32-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst" using writemask "k" (elements are copied from "src" when the corresponding - mask bit is not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*32 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + + + Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*32 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - - - Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are - loaded from addresses starting at "base_addr" and offset by each 32-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst" using writemask "k" (elements are copied from "src" when the corresponding - mask bit is not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*64 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + + + Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*64 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - - - Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are - loaded from addresses starting at "base_addr" and offset by each 32-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst" using writemask "k" (elements are copied from "src" when the corresponding - mask bit is not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*64 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + + + Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*64 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - - - Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are - loaded from addresses starting at "base_addr" and offset by each 64-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst" using writemask "k" (elements are copied from "src" when the corresponding - mask bit is not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*32 - m := j*64 - IF k[j] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + + + Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*32 + m := j*64 + IF k[j] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - - - Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are - loaded from addresses starting at "base_addr" and offset by each 64-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst" using writemask "k" (elements are copied from "src" when the corresponding - mask bit is not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*32 - m := j*64 - IF k[j] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + + + Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*32 + m := j*64 + IF k[j] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - - - Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are - loaded from addresses starting at "base_addr" and offset by each 64-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst" using writemask "k" (elements are copied from "src" when the corresponding - mask bit is not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 3 - i := j*64 - m := j*64 - IF k[j] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + + + Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 3 + i := j*64 + m := j*64 + IF k[j] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - - - Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are - loaded from addresses starting at "base_addr" and offset by each 64-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst" using writemask "k" (elements are copied from "src" when the corresponding - mask bit is not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 1 - i := j*64 - m := j*64 - IF k[j] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + + + + + Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 1 + i := j*64 + m := j*64 + IF k[j] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - Load 256-bits (composed of 4 packed 64-bit integers) from memory into "dst". - "mem_addr" does not need to be aligned on any particular boundary. - - dst[255:0] := MEM[mem_addr+255:mem_addr] - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + Load 256-bits (composed of 4 packed 64-bit integers) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[255:0] := MEM[mem_addr+255:mem_addr] +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - Load 256-bits (composed of 8 packed 32-bit integers) from memory into "dst". - "mem_addr" does not need to be aligned on any particular boundary. - - dst[255:0] := MEM[mem_addr+255:mem_addr] - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + Load 256-bits (composed of 8 packed 32-bit integers) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[255:0] := MEM[mem_addr+255:mem_addr] +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - Load 128-bits (composed of 2 packed 64-bit integers) from memory into "dst". - "mem_addr" does not need to be aligned on any particular boundary. - - dst[127:0] := MEM[mem_addr+127:mem_addr] - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + Load 128-bits (composed of 2 packed 64-bit integers) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[127:0] := MEM[mem_addr+127:mem_addr] +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - Load 128-bits (composed of 4 packed 32-bit integers) from memory into "dst". - "mem_addr" does not need to be aligned on any particular boundary. - - dst[127:0] := MEM[mem_addr+127:mem_addr] - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + Load 128-bits (composed of 4 packed 32-bit integers) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[127:0] := MEM[mem_addr+127:mem_addr] +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - Load 256-bits (composed of 4 packed 64-bit integers) from memory into "dst". - "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may - be generated. - - dst[255:0] := MEM[mem_addr+255:mem_addr] - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + Load 256-bits (composed of 4 packed 64-bit integers) from memory into "dst". + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +dst[255:0] := MEM[mem_addr+255:mem_addr] +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - Load 256-bits (composed of 8 packed 32-bit integers) from memory into "dst". - "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may - be generated. - - dst[255:0] := MEM[mem_addr+255:mem_addr] - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + Load 256-bits (composed of 8 packed 32-bit integers) from memory into "dst". + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +dst[255:0] := MEM[mem_addr+255:mem_addr] +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - Load 128-bits (composed of 2 packed 64-bit integers) from memory into "dst". - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - dst[127:0] := MEM[mem_addr+127:mem_addr] - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + Load 128-bits (composed of 2 packed 64-bit integers) from memory into "dst". + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +dst[127:0] := MEM[mem_addr+127:mem_addr] +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - Load 128-bits (composed of 4 packed 32-bit integers) from memory into "dst". - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - dst[127:0] := MEM[mem_addr+127:mem_addr] - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Load + + + Load 128-bits (composed of 4 packed 32-bit integers) from memory into "dst". + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +dst[127:0] := MEM[mem_addr+127:mem_addr] +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Load
- - - - - Move packed double-precision (64-bit) floating-point elements from "a" to "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + + Move packed double-precision (64-bit) floating-point elements from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - Move packed double-precision (64-bit) floating-point elements from "a" into - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + Move packed double-precision (64-bit) floating-point elements from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - - Move packed double-precision (64-bit) floating-point elements from "a" to "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + + Move packed double-precision (64-bit) floating-point elements from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - Move packed double-precision (64-bit) floating-point elements from "a" into - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + Move packed double-precision (64-bit) floating-point elements from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - - Move packed single-precision (32-bit) floating-point elements from "a" to "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + + Move packed single-precision (32-bit) floating-point elements from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - Move packed single-precision (32-bit) floating-point elements from "a" into - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + Move packed single-precision (32-bit) floating-point elements from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - - Move packed single-precision (32-bit) floating-point elements from "a" to "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + + Move packed single-precision (32-bit) floating-point elements from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - Move packed single-precision (32-bit) floating-point elements from "a" into - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + Move packed single-precision (32-bit) floating-point elements from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - - Duplicate even-indexed double-precision (64-bit) floating-point elements from - "a", and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - tmp[63:0] := a[63:0] - tmp[127:64] := a[63:0] - tmp[191:128] := a[191:128] - tmp[255:192] := a[191:128] - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + + Duplicate even-indexed double-precision (64-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[63:0] := a[63:0] +tmp[127:64] := a[63:0] +tmp[191:128] := a[191:128] +tmp[255:192] := a[191:128] +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - Duplicate even-indexed double-precision (64-bit) floating-point elements from - "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - tmp[63:0] := a[63:0] - tmp[127:64] := a[63:0] - tmp[191:128] := a[191:128] - tmp[255:192] := a[191:128] - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + Duplicate even-indexed double-precision (64-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[63:0] := a[63:0] +tmp[127:64] := a[63:0] +tmp[191:128] := a[191:128] +tmp[255:192] := a[191:128] +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - - Duplicate even-indexed double-precision (64-bit) floating-point elements from - "a", and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - tmp[63:0] := a[63:0] - tmp[127:64] := a[63:0] - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + + Duplicate even-indexed double-precision (64-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[63:0] := a[63:0] +tmp[127:64] := a[63:0] +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - Duplicate even-indexed double-precision (64-bit) floating-point elements from - "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - tmp[63:0] := a[63:0] - tmp[127:64] := a[63:0] - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + Duplicate even-indexed double-precision (64-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[63:0] := a[63:0] +tmp[127:64] := a[63:0] +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - - Move packed 32-bit integers from "a" to "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + + Move packed 32-bit integers from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - Move packed 32-bit integers from "a" into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + Move packed 32-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - - Move packed 32-bit integers from "a" to "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + + Move packed 32-bit integers from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - Move packed 32-bit integers from "a" into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + Move packed 32-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - - Move packed 64-bit integers from "a" to "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + + Move packed 64-bit integers from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - Move packed 64-bit integers from "a" into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + Move packed 64-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - - Move packed 64-bit integers from "a" to "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + + Move packed 64-bit integers from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - Move packed 64-bit integers from "a" into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + Move packed 64-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - - Duplicate odd-indexed single-precision (32-bit) floating-point elements from - "a", and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - tmp[31:0] := a[63:32] - tmp[63:32] := a[63:32] - tmp[95:64] := a[127:96] - tmp[127:96] := a[127:96] - tmp[159:128] := a[191:160] - tmp[191:160] := a[191:160] - tmp[223:192] := a[255:224] - tmp[255:224] := a[255:224] - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + + Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[31:0] := a[63:32] +tmp[63:32] := a[63:32] +tmp[95:64] := a[127:96] +tmp[127:96] := a[127:96] +tmp[159:128] := a[191:160] +tmp[191:160] := a[191:160] +tmp[223:192] := a[255:224] +tmp[255:224] := a[255:224] +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - Duplicate odd-indexed single-precision (32-bit) floating-point elements from - "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - tmp[31:0] := a[63:32] - tmp[63:32] := a[63:32] - tmp[95:64] := a[127:96] - tmp[127:96] := a[127:96] - tmp[159:128] := a[191:160] - tmp[191:160] := a[191:160] - tmp[223:192] := a[255:224] - tmp[255:224] := a[255:224] - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[31:0] := a[63:32] +tmp[63:32] := a[63:32] +tmp[95:64] := a[127:96] +tmp[127:96] := a[127:96] +tmp[159:128] := a[191:160] +tmp[191:160] := a[191:160] +tmp[223:192] := a[255:224] +tmp[255:224] := a[255:224] +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - - Duplicate odd-indexed single-precision (32-bit) floating-point elements from - "a", and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - tmp[31:0] := a[63:32] - tmp[63:32] := a[63:32] - tmp[95:64] := a[127:96] - tmp[127:96] := a[127:96] - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + + Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[31:0] := a[63:32] +tmp[63:32] := a[63:32] +tmp[95:64] := a[127:96] +tmp[127:96] := a[127:96] +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - Duplicate odd-indexed single-precision (32-bit) floating-point elements from - "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - tmp[31:0] := a[63:32] - tmp[63:32] := a[63:32] - tmp[95:64] := a[127:96] - tmp[127:96] := a[127:96] - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[31:0] := a[63:32] +tmp[63:32] := a[63:32] +tmp[95:64] := a[127:96] +tmp[127:96] := a[127:96] +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - - Duplicate even-indexed single-precision (32-bit) floating-point elements from - "a", and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - tmp[31:0] := a[31:0] - tmp[63:32] := a[31:0] - tmp[95:64] := a[95:64] - tmp[127:96] := a[95:64] - tmp[159:128] := a[159:128] - tmp[191:160] := a[159:128] - tmp[223:192] := a[223:192] - tmp[255:224] := a[223:192] - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + + Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[31:0] := a[31:0] +tmp[63:32] := a[31:0] +tmp[95:64] := a[95:64] +tmp[127:96] := a[95:64] +tmp[159:128] := a[159:128] +tmp[191:160] := a[159:128] +tmp[223:192] := a[223:192] +tmp[255:224] := a[223:192] +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - Duplicate even-indexed single-precision (32-bit) floating-point elements from - "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - tmp[31:0] := a[31:0] - tmp[63:32] := a[31:0] - tmp[95:64] := a[95:64] - tmp[127:96] := a[95:64] - tmp[159:128] := a[159:128] - tmp[191:160] := a[159:128] - tmp[223:192] := a[223:192] - tmp[255:224] := a[223:192] - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[31:0] := a[31:0] +tmp[63:32] := a[31:0] +tmp[95:64] := a[95:64] +tmp[127:96] := a[95:64] +tmp[159:128] := a[159:128] +tmp[191:160] := a[159:128] +tmp[223:192] := a[223:192] +tmp[255:224] := a[223:192] +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - - Duplicate even-indexed single-precision (32-bit) floating-point elements from - "a", and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - tmp[31:0] := a[31:0] - tmp[63:32] := a[31:0] - tmp[95:64] := a[95:64] - tmp[127:96] := a[95:64] - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + + Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[31:0] := a[31:0] +tmp[63:32] := a[31:0] +tmp[95:64] := a[95:64] +tmp[127:96] := a[95:64] +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - Duplicate even-indexed single-precision (32-bit) floating-point elements from - "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - tmp[31:0] := a[31:0] - tmp[63:32] := a[31:0] - tmp[95:64] := a[95:64] - tmp[127:96] := a[95:64] - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Move + + + + Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[31:0] := a[31:0] +tmp[63:32] := a[31:0] +tmp[95:64] := a[95:64] +tmp[127:96] := a[95:64] +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Move
- - - - - - Compute the bitwise AND of packed 32-bit integers in "a" and "b", and store the - results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] AND b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise AND of packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] AND b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise AND of packed 32-bit integers in "a" and "b", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] AND b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise AND of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] AND b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise AND of packed 32-bit integers in "a" and "b", and store the - results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] AND b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise AND of packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] AND b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise AND of packed 32-bit integers in "a" and "b", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] AND b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise AND of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] AND b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := (NOT a[i+31:i]) AND b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := (NOT a[i+31:i]) AND b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := (NOT a[i+31:i]) AND b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := (NOT a[i+31:i]) AND b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise NOT of packed 64-bit integers in "a" and then AND with "b", - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise NOT of packed 64-bit integers in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise NOT of packed 64-bit integers in "a" and then AND with "b", - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := (NOT a[i+63:i]) AND b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise NOT of packed 64-bit integers in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := (NOT a[i+63:i]) AND b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise NOT of packed 64-bit integers in "a" and then AND with "b", - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise NOT of packed 64-bit integers in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise NOT of packed 64-bit integers in "a" and then AND with "b", - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := (NOT a[i+63:i]) AND b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise NOT of packed 64-bit integers in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := (NOT a[i+63:i]) AND b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise AND of packed 64-bit integers in "a" and "b", and store the - results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] AND b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise AND of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] AND b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise AND of packed 64-bit integers in "a" and "b", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] AND b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise AND of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] AND b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise AND of packed 64-bit integers in "a" and "b", and store the - results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] AND b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise AND of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] AND b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise AND of packed 64-bit integers in "a" and "b", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] AND b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise AND of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] AND b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the - results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] OR b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] OR b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] OR b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] OR b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the - results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] OR b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] OR b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] OR b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] OR b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the - results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] OR b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] OR b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] OR b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] OR b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the - results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] OR b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] OR b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] OR b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] OR b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - - Bitwise ternary logic that provides the capability to implement any - three-operand binary function; the specific binary function is specified by value in - "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", - and "c" are used according to "imm8", and the result is written to the corresponding bit - in "dst" using writemask "k" at 32-bit granularity (32-bit elements are copied from "a" - when the corresponding mask bit is not set). - - DEFINE TernaryOP(imm8, a, b, c) { - CASE imm8[7:0] OF - 0: dst[0] := 0 // imm8[7:0] := 0 - 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C) - // ... - 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C - 255: dst[0] := 1 // imm8[7:0] := 1 - ESAC - } - imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) - FOR j := 0 to 7 - i := j*32 - IF k[j] - FOR h := 0 to 31 + + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", and "c" are used according to "imm8", and the result is written to the corresponding bit in "dst" using writemask "k" at 32-bit granularity (32-bit elements are copied from "a" when the corresponding mask bit is not set). + +DEFINE TernaryOP(imm8, a, b, c) { + CASE imm8[7:0] OF + 0: dst[0] := 0 // imm8[7:0] := 0 + 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C) + // ... + 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C + 255: dst[0] := 1 // imm8[7:0] := 1 + ESAC +} +imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) +FOR j := 0 to 7 + i := j*32 + IF k[j] + FOR h := 0 to 31 dst[i+h] := TernaryOP(imm8[7:0], a[i+h], b[i+h], c[i+h]) - ENDFOR - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + ENDFOR + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - - Bitwise ternary logic that provides the capability to implement any - three-operand binary function; the specific binary function is specified by value in - "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", - and "c" are used according to "imm8", and the result is written to the corresponding bit - in "dst" using zeromask "k" at 32-bit granularity (32-bit elements are zeroed out when - the corresponding mask bit is not set). - - DEFINE TernaryOP(imm8, a, b, c) { - CASE imm8[7:0] OF - 0: dst[0] := 0 // imm8[7:0] := 0 - 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C) - // ... - 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C - 255: dst[0] := 1 // imm8[7:0] := 1 - ESAC - } - imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) - FOR j := 0 to 7 - i := j*32 - IF k[j] - FOR h := 0 to 31 + + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", and "c" are used according to "imm8", and the result is written to the corresponding bit in "dst" using zeromask "k" at 32-bit granularity (32-bit elements are zeroed out when the corresponding mask bit is not set). + +DEFINE TernaryOP(imm8, a, b, c) { + CASE imm8[7:0] OF + 0: dst[0] := 0 // imm8[7:0] := 0 + 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C) + // ... + 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C + 255: dst[0] := 1 // imm8[7:0] := 1 + ESAC +} +imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) +FOR j := 0 to 7 + i := j*32 + IF k[j] + FOR h := 0 to 31 dst[i+h] := TernaryOP(imm8[7:0], a[i+h], b[i+h], c[i+h]) - ENDFOR - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + ENDFOR + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - Bitwise ternary logic that provides the capability to implement any - three-operand binary function; the specific binary function is specified by value in - "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", - and "c" are used according to "imm8", and the result is written to the corresponding bit - in "dst". - - DEFINE TernaryOP(imm8, a, b, c) { - CASE imm8[7:0] OF - 0: dst[0] := 0 // imm8[7:0] := 0 - 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C) - // ... - 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C - 255: dst[0] := 1 // imm8[7:0] := 1 - ESAC - } - imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) - FOR j := 0 to 7 - i := j*32 - FOR h := 0 to 31 - dst[i+h] := TernaryOP(imm8[7:0], a[i+h], b[i+h], c[i+h]) - ENDFOR - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", and "c" are used according to "imm8", and the result is written to the corresponding bit in "dst". + +DEFINE TernaryOP(imm8, a, b, c) { + CASE imm8[7:0] OF + 0: dst[0] := 0 // imm8[7:0] := 0 + 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C) + // ... + 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C + 255: dst[0] := 1 // imm8[7:0] := 1 + ESAC +} +imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) +FOR j := 0 to 7 + i := j*32 + FOR h := 0 to 31 + dst[i+h] := TernaryOP(imm8[7:0], a[i+h], b[i+h], c[i+h]) + ENDFOR +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - - Bitwise ternary logic that provides the capability to implement any - three-operand binary function; the specific binary function is specified by value in - "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", - and "c" are used according to "imm8", and the result is written to the corresponding bit - in "dst" using writemask "k" at 32-bit granularity (32-bit elements are copied from "a" - when the corresponding mask bit is not set). - - DEFINE TernaryOP(imm8, a, b, c) { - CASE imm8[7:0] OF - 0: dst[0] := 0 // imm8[7:0] := 0 - 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C) - // ... - 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C - 255: dst[0] := 1 // imm8[7:0] := 1 - ESAC - } - imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) - FOR j := 0 to 3 - i := j*32 - IF k[j] - FOR h := 0 to 31 + + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", and "c" are used according to "imm8", and the result is written to the corresponding bit in "dst" using writemask "k" at 32-bit granularity (32-bit elements are copied from "a" when the corresponding mask bit is not set). + +DEFINE TernaryOP(imm8, a, b, c) { + CASE imm8[7:0] OF + 0: dst[0] := 0 // imm8[7:0] := 0 + 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C) + // ... + 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C + 255: dst[0] := 1 // imm8[7:0] := 1 + ESAC +} +imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) +FOR j := 0 to 3 + i := j*32 + IF k[j] + FOR h := 0 to 31 dst[i+h] := TernaryOP(imm8[7:0], a[i+h], b[i+h], c[i+h]) - ENDFOR - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + ENDFOR + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - - Bitwise ternary logic that provides the capability to implement any - three-operand binary function; the specific binary function is specified by value in - "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", - and "c" are used according to "imm8", and the result is written to the corresponding bit - in "dst" using zeromask "k" at 32-bit granularity (32-bit elements are zeroed out when - the corresponding mask bit is not set). - - DEFINE TernaryOP(imm8, a, b, c) { - CASE imm8[7:0] OF - 0: dst[0] := 0 // imm8[7:0] := 0 - 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C) - // ... - 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C - 255: dst[0] := 1 // imm8[7:0] := 1 - ESAC - } - imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) - FOR j := 0 to 3 - i := j*32 - IF k[j] - FOR h := 0 to 31 + + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", and "c" are used according to "imm8", and the result is written to the corresponding bit in "dst" using zeromask "k" at 32-bit granularity (32-bit elements are zeroed out when the corresponding mask bit is not set). + +DEFINE TernaryOP(imm8, a, b, c) { + CASE imm8[7:0] OF + 0: dst[0] := 0 // imm8[7:0] := 0 + 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C) + // ... + 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C + 255: dst[0] := 1 // imm8[7:0] := 1 + ESAC +} +imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) +FOR j := 0 to 3 + i := j*32 + IF k[j] + FOR h := 0 to 31 dst[i+h] := TernaryOP(imm8[7:0], a[i+h], b[i+h], c[i+h]) - ENDFOR - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + ENDFOR + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - Bitwise ternary logic that provides the capability to implement any - three-operand binary function; the specific binary function is specified by value in - "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", - and "c" are used according to "imm8", and the result is written to the corresponding bit - in "dst". - - DEFINE TernaryOP(imm8, a, b, c) { - CASE imm8[7:0] OF - 0: dst[0] := 0 // imm8[7:0] := 0 - 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C) - // ... - 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C - 255: dst[0] := 1 // imm8[7:0] := 1 - ESAC - } - imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) - FOR j := 0 to 3 - i := j*32 - FOR h := 0 to 31 - dst[i+h] := TernaryOP(imm8[7:0], a[i+h], b[i+h], c[i+h]) - ENDFOR - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", and "c" are used according to "imm8", and the result is written to the corresponding bit in "dst". + +DEFINE TernaryOP(imm8, a, b, c) { + CASE imm8[7:0] OF + 0: dst[0] := 0 // imm8[7:0] := 0 + 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C) + // ... + 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C + 255: dst[0] := 1 // imm8[7:0] := 1 + ESAC +} +imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) +FOR j := 0 to 3 + i := j*32 + FOR h := 0 to 31 + dst[i+h] := TernaryOP(imm8[7:0], a[i+h], b[i+h], c[i+h]) + ENDFOR +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - - Bitwise ternary logic that provides the capability to implement any - three-operand binary function; the specific binary function is specified by value in - "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", - and "c" are used according to "imm8", and the result is written to the corresponding bit - in "dst" using writemask "k" at 64-bit granularity (64-bit elements are copied from "a" - when the corresponding mask bit is not set). - - DEFINE TernaryOP(imm8, a, b, c) { - CASE imm8[7:0] OF - 0: dst[0] := 0 // imm8[7:0] := 0 - 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C) - // ... - 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C - 255: dst[0] := 1 // imm8[7:0] := 1 - ESAC - } - imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) - FOR j := 0 to 3 - i := j*64 - IF k[j] - FOR h := 0 to 63 + + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", and "c" are used according to "imm8", and the result is written to the corresponding bit in "dst" using writemask "k" at 64-bit granularity (64-bit elements are copied from "a" when the corresponding mask bit is not set). + +DEFINE TernaryOP(imm8, a, b, c) { + CASE imm8[7:0] OF + 0: dst[0] := 0 // imm8[7:0] := 0 + 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C) + // ... + 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C + 255: dst[0] := 1 // imm8[7:0] := 1 + ESAC +} +imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) +FOR j := 0 to 3 + i := j*64 + IF k[j] + FOR h := 0 to 63 dst[i+h] := TernaryOP(imm8[7:0], a[i+h], b[i+h], c[i+h]) - ENDFOR - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + ENDFOR + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - - Bitwise ternary logic that provides the capability to implement any - three-operand binary function; the specific binary function is specified by value in - "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", - and "c" are used according to "imm8", and the result is written to the corresponding bit - in "dst" using zeromask "k" at 64-bit granularity (64-bit elements are zeroed out when - the corresponding mask bit is not set). - - DEFINE TernaryOP(imm8, a, b, c) { - CASE imm8[7:0] OF - 0: dst[0] := 0 // imm8[7:0] := 0 - 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C) - // ... - 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C - 255: dst[0] := 1 // imm8[7:0] := 1 - ESAC - } - imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) - FOR j := 0 to 3 - i := j*64 - IF k[j] - FOR h := 0 to 63 + + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", and "c" are used according to "imm8", and the result is written to the corresponding bit in "dst" using zeromask "k" at 64-bit granularity (64-bit elements are zeroed out when the corresponding mask bit is not set). + +DEFINE TernaryOP(imm8, a, b, c) { + CASE imm8[7:0] OF + 0: dst[0] := 0 // imm8[7:0] := 0 + 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C) + // ... + 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C + 255: dst[0] := 1 // imm8[7:0] := 1 + ESAC +} +imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) +FOR j := 0 to 3 + i := j*64 + IF k[j] + FOR h := 0 to 63 dst[i+h] := TernaryOP(imm8[7:0], a[i+h], b[i+h], c[i+h]) - ENDFOR - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + ENDFOR + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - Bitwise ternary logic that provides the capability to implement any - three-operand binary function; the specific binary function is specified by value in - "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", - and "c" are used according to "imm8", and the result is written to the corresponding bit - in "dst". - - DEFINE TernaryOP(imm8, a, b, c) { - CASE imm8[7:0] OF - 0: dst[0] := 0 // imm8[7:0] := 0 - 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C) - // ... - 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C - 255: dst[0] := 1 // imm8[7:0] := 1 - ESAC - } - imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) - FOR j := 0 to 3 - i := j*64 - FOR h := 0 to 63 - dst[i+h] := TernaryOP(imm8[7:0], a[i+h], b[i+h], c[i+h]) - ENDFOR - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", and "c" are used according to "imm8", and the result is written to the corresponding bit in "dst". + +DEFINE TernaryOP(imm8, a, b, c) { + CASE imm8[7:0] OF + 0: dst[0] := 0 // imm8[7:0] := 0 + 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C) + // ... + 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C + 255: dst[0] := 1 // imm8[7:0] := 1 + ESAC +} +imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) +FOR j := 0 to 3 + i := j*64 + FOR h := 0 to 63 + dst[i+h] := TernaryOP(imm8[7:0], a[i+h], b[i+h], c[i+h]) + ENDFOR +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - - Bitwise ternary logic that provides the capability to implement any - three-operand binary function; the specific binary function is specified by value in - "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", - and "c" are used according to "imm8", and the result is written to the corresponding bit - in "dst" using writemask "k" at 64-bit granularity (64-bit elements are copied from "a" - when the corresponding mask bit is not set). - - DEFINE TernaryOP(imm8, a, b, c) { - CASE imm8[7:0] OF - 0: dst[0] := 0 // imm8[7:0] := 0 - 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C) - // ... - 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C - 255: dst[0] := 1 // imm8[7:0] := 1 - ESAC - } - imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) - FOR j := 0 to 1 - i := j*64 - IF k[j] - FOR h := 0 to 63 + + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", and "c" are used according to "imm8", and the result is written to the corresponding bit in "dst" using writemask "k" at 64-bit granularity (64-bit elements are copied from "a" when the corresponding mask bit is not set). + +DEFINE TernaryOP(imm8, a, b, c) { + CASE imm8[7:0] OF + 0: dst[0] := 0 // imm8[7:0] := 0 + 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C) + // ... + 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C + 255: dst[0] := 1 // imm8[7:0] := 1 + ESAC +} +imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) +FOR j := 0 to 1 + i := j*64 + IF k[j] + FOR h := 0 to 63 dst[i+h] := TernaryOP(imm8[7:0], a[i+h], b[i+h], c[i+h]) - ENDFOR - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + ENDFOR + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - - Bitwise ternary logic that provides the capability to implement any - three-operand binary function; the specific binary function is specified by value in - "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", - and "c" are used according to "imm8", and the result is written to the corresponding bit - in "dst" using zeromask "k" at 64-bit granularity (64-bit elements are zeroed out when - the corresponding mask bit is not set). - - DEFINE TernaryOP(imm8, a, b, c) { - CASE imm8[7:0] OF - 0: dst[0] := 0 // imm8[7:0] := 0 - 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C) - // ... - 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C - 255: dst[0] := 1 // imm8[7:0] := 1 - ESAC - } - imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) - FOR j := 0 to 1 - i := j*64 - IF k[j] - FOR h := 0 to 63 + + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", and "c" are used according to "imm8", and the result is written to the corresponding bit in "dst" using zeromask "k" at 64-bit granularity (64-bit elements are zeroed out when the corresponding mask bit is not set). + +DEFINE TernaryOP(imm8, a, b, c) { + CASE imm8[7:0] OF + 0: dst[0] := 0 // imm8[7:0] := 0 + 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C) + // ... + 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C + 255: dst[0] := 1 // imm8[7:0] := 1 + ESAC +} +imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) +FOR j := 0 to 1 + i := j*64 + IF k[j] + FOR h := 0 to 63 dst[i+h] := TernaryOP(imm8[7:0], a[i+h], b[i+h], c[i+h]) - ENDFOR - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + ENDFOR + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - Bitwise ternary logic that provides the capability to implement any - three-operand binary function; the specific binary function is specified by value in - "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", - and "c" are used according to "imm8", and the result is written to the corresponding bit - in "dst". - - DEFINE TernaryOP(imm8, a, b, c) { - CASE imm8[7:0] OF - 0: dst[0] := 0 // imm8[7:0] := 0 - 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C) - // ... - 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C - 255: dst[0] := 1 // imm8[7:0] := 1 - ESAC - } - imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) - FOR j := 0 to 1 - i := j*64 - FOR h := 0 to 63 - dst[i+h] := TernaryOP(imm8[7:0], a[i+h], b[i+h], c[i+h]) - ENDFOR - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", and "c" are used according to "imm8", and the result is written to the corresponding bit in "dst". + +DEFINE TernaryOP(imm8, a, b, c) { + CASE imm8[7:0] OF + 0: dst[0] := 0 // imm8[7:0] := 0 + 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C) + // ... + 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C + 255: dst[0] := 1 // imm8[7:0] := 1 + ESAC +} +imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) +FOR j := 0 to 1 + i := j*64 + FOR h := 0 to 63 + dst[i+h] := TernaryOP(imm8[7:0], a[i+h], b[i+h], c[i+h]) + ENDFOR +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the - results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] XOR b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] XOR b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the - results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] XOR b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] XOR b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the - results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] XOR b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] XOR b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - - Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the - results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] XOR b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + + Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] XOR b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + + Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the - results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := a[i+63:i] XOR b[i+63:i] - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the - results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := a[i+31:i] XOR b[i+31:i] - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the - results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := a[i+63:i] XOR b[i+63:i] - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the - results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := a[i+31:i] XOR b[i+31:i] - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the - results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := a[i+63:i] OR b[i+63:i] - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := a[i+63:i] OR b[i+63:i] +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the - results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := a[i+31:i] OR b[i+31:i] - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := a[i+31:i] OR b[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the - results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := a[i+63:i] OR b[i+63:i] - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[i+63:i] OR b[i+63:i] +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the - results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := a[i+31:i] OR b[i+31:i] - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Logical + + + + Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[i+31:i] OR b[i+31:i] +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Logical
- - - - - Broadcast 32-bit integer "a" to all elements of "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[31:0] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Set + + + + + Broadcast 32-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Set
- - - - Broadcast 32-bit integer "a" to all elements of "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[31:0] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Set + + + + Broadcast 32-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Set
- - - - - Broadcast 32-bit integer "a" to all elements of "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[31:0] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Set + + + + + Broadcast 32-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Set
- - - - Broadcast 32-bit integer "a" to all elements of "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[31:0] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Set + + + + Broadcast 32-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Set
- - - - - Broadcast 64-bit integer "a" to all elements of "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[63:0] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Set + + + + + Broadcast 64-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Set
- - - - Broadcast 64-bit integer "a" to all elements of "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := a[63:0] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Set + + + + Broadcast 64-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Set
- - - - - Broadcast 64-bit integer "a" to all elements of "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[63:0] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Set + + + + + Broadcast 64-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Set
- - - - Broadcast 64-bit integer "a" to all elements of "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := a[63:0] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Set + + + + Broadcast 64-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Set
- - - - - - Rotate the bits in each packed 32-bit integer in "a" to the left by the number - of bits specified in "imm8", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - DEFINE LEFT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src << count) OR (src >> (32 - count)) - } - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE LEFT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Rotate the bits in each packed 32-bit integer in "a" to the left by the number - of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - DEFINE LEFT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src << count) OR (src >> (32 - count)) - } - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE LEFT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - Rotate the bits in each packed 32-bit integer in "a" to the left by the number - of bits specified in "imm8", and store the results in "dst". - - DEFINE LEFT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src << count) OR (src >> (32 - count)) - } - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst". + +DEFINE LEFT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Rotate the bits in each packed 32-bit integer in "a" to the left by the number - of bits specified in "imm8", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - DEFINE LEFT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src << count) OR (src >> (32 - count)) - } - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE LEFT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Rotate the bits in each packed 32-bit integer in "a" to the left by the number - of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - DEFINE LEFT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src << count) OR (src >> (32 - count)) - } - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE LEFT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - Rotate the bits in each packed 32-bit integer in "a" to the left by the number - of bits specified in "imm8", and store the results in "dst". - - DEFINE LEFT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src << count) OR (src >> (32 - count)) - } - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst". + +DEFINE LEFT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Rotate the bits in each packed 64-bit integer in "a" to the left by the number - of bits specified in "imm8", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - DEFINE LEFT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src << count) OR (src >> (64 - count)) - } - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE LEFT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Rotate the bits in each packed 64-bit integer in "a" to the left by the number - of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - DEFINE LEFT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src << count) OR (src >> (64 - count)) - } - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE LEFT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - Rotate the bits in each packed 64-bit integer in "a" to the left by the number - of bits specified in "imm8", and store the results in "dst". - - DEFINE LEFT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src << count) OR (src >> (64 - count)) - } - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst". + +DEFINE LEFT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Rotate the bits in each packed 64-bit integer in "a" to the left by the number - of bits specified in "imm8", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - DEFINE LEFT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src << count) OR (src >> (64 - count)) - } - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE LEFT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Rotate the bits in each packed 64-bit integer in "a" to the left by the number - of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - DEFINE LEFT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src << count) OR (src >> (64 - count)) - } - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE LEFT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - Rotate the bits in each packed 64-bit integer in "a" to the left by the number - of bits specified in "imm8", and store the results in "dst". - - DEFINE LEFT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src << count) OR (src >> (64 - count)) - } - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst". + +DEFINE LEFT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Rotate the bits in each packed 32-bit integer in "a" to the left by the number - of bits specified in the corresponding element of "b", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - DEFINE LEFT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src << count) OR (src >> (32 - count)) - } - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE LEFT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Rotate the bits in each packed 32-bit integer in "a" to the left by the number - of bits specified in the corresponding element of "b", and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE LEFT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src << count) OR (src >> (32 - count)) - } - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE LEFT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - Rotate the bits in each packed 32-bit integer in "a" to the left by the number - of bits specified in the corresponding element of "b", and store the results in "dst". - - DEFINE LEFT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src << count) OR (src >> (32 - count)) - } - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst". + +DEFINE LEFT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Rotate the bits in each packed 32-bit integer in "a" to the left by the number - of bits specified in the corresponding element of "b", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - DEFINE LEFT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src << count) OR (src >> (32 - count)) - } - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE LEFT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Rotate the bits in each packed 32-bit integer in "a" to the left by the number - of bits specified in the corresponding element of "b", and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE LEFT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src << count) OR (src >> (32 - count)) - } - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE LEFT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - Rotate the bits in each packed 32-bit integer in "a" to the left by the number - of bits specified in the corresponding element of "b", and store the results in "dst". - - DEFINE LEFT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src << count) OR (src >> (32 - count)) - } - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst". + +DEFINE LEFT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Rotate the bits in each packed 64-bit integer in "a" to the left by the number - of bits specified in the corresponding element of "b", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - DEFINE LEFT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src << count) OR (src >> (64 - count)) - } - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE LEFT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Rotate the bits in each packed 64-bit integer in "a" to the left by the number - of bits specified in the corresponding element of "b", and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE LEFT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src << count) OR (src >> (64 - count)) - } - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE LEFT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - Rotate the bits in each packed 64-bit integer in "a" to the left by the number - of bits specified in the corresponding element of "b", and store the results in "dst". - - DEFINE LEFT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src << count) OR (src >> (64 - count)) - } - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst". + +DEFINE LEFT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Rotate the bits in each packed 64-bit integer in "a" to the left by the number - of bits specified in the corresponding element of "b", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - DEFINE LEFT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src << count) OR (src >> (64 - count)) - } - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE LEFT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Rotate the bits in each packed 64-bit integer in "a" to the left by the number - of bits specified in the corresponding element of "b", and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE LEFT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src << count) OR (src >> (64 - count)) - } - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE LEFT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - Rotate the bits in each packed 64-bit integer in "a" to the left by the number - of bits specified in the corresponding element of "b", and store the results in "dst". - - DEFINE LEFT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src << count) OR (src >> (64 - count)) - } - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst". + +DEFINE LEFT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Rotate the bits in each packed 32-bit integer in "a" to the right by the number - of bits specified in "imm8", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src >>count) OR (src << (32 - count)) - } - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Rotate the bits in each packed 32-bit integer in "a" to the right by the number - of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src >>count) OR (src << (32 - count)) - } - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - Rotate the bits in each packed 32-bit integer in "a" to the right by the number - of bits specified in "imm8", and store the results in "dst". - - DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src >>count) OR (src << (32 - count)) - } - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst". + +DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Rotate the bits in each packed 32-bit integer in "a" to the right by the number - of bits specified in "imm8", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src >>count) OR (src << (32 - count)) - } - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Rotate the bits in each packed 32-bit integer in "a" to the right by the number - of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src >>count) OR (src << (32 - count)) - } - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - Rotate the bits in each packed 32-bit integer in "a" to the right by the number - of bits specified in "imm8", and store the results in "dst". - - DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src >>count) OR (src << (32 - count)) - } - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst". + +DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Rotate the bits in each packed 64-bit integer in "a" to the right by the number - of bits specified in "imm8", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src >> count) OR (src << (64 - count)) - } - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Rotate the bits in each packed 64-bit integer in "a" to the right by the number - of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src >> count) OR (src << (64 - count)) - } - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - Rotate the bits in each packed 64-bit integer in "a" to the right by the number - of bits specified in "imm8", and store the results in "dst". - - DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src >> count) OR (src << (64 - count)) - } - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst". + +DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Rotate the bits in each packed 64-bit integer in "a" to the right by the number - of bits specified in "imm8", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src >> count) OR (src << (64 - count)) - } - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Rotate the bits in each packed 64-bit integer in "a" to the right by the number - of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src >> count) OR (src << (64 - count)) - } - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - Rotate the bits in each packed 64-bit integer in "a" to the right by the number - of bits specified in "imm8", and store the results in "dst". - - DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src >> count) OR (src << (64 - count)) - } - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst". + +DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Rotate the bits in each packed 32-bit integer in "a" to the right by the number - of bits specified in the corresponding element of "b", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src >>count) OR (src << (32 - count)) - } - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Rotate the bits in each packed 32-bit integer in "a" to the right by the number - of bits specified in the corresponding element of "b", and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src >>count) OR (src << (32 - count)) - } - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - Rotate the bits in each packed 32-bit integer in "a" to the right by the number - of bits specified in the corresponding element of "b", and store the results in "dst". - - DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src >>count) OR (src << (32 - count)) - } - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst". + +DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Rotate the bits in each packed 32-bit integer in "a" to the right by the number - of bits specified in the corresponding element of "b", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src >>count) OR (src << (32 - count)) - } - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Rotate the bits in each packed 32-bit integer in "a" to the right by the number - of bits specified in the corresponding element of "b", and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src >>count) OR (src << (32 - count)) - } - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - Rotate the bits in each packed 32-bit integer in "a" to the right by the number - of bits specified in the corresponding element of "b", and store the results in "dst". - - DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src >>count) OR (src << (32 - count)) - } - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst". + +DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Rotate the bits in each packed 64-bit integer in "a" to the right by the number - of bits specified in the corresponding element of "b", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src >> count) OR (src << (64 - count)) - } - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Rotate the bits in each packed 64-bit integer in "a" to the right by the number - of bits specified in the corresponding element of "b", and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src >> count) OR (src << (64 - count)) - } - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - Rotate the bits in each packed 64-bit integer in "a" to the right by the number - of bits specified in the corresponding element of "b", and store the results in "dst". - - DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src >> count) OR (src << (64 - count)) - } - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst". + +DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Rotate the bits in each packed 64-bit integer in "a" to the right by the number - of bits specified in the corresponding element of "b", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src >> count) OR (src << (64 - count)) - } - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Rotate the bits in each packed 64-bit integer in "a" to the right by the number - of bits specified in the corresponding element of "b", and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src >> count) OR (src << (64 - count)) - } - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - Rotate the bits in each packed 64-bit integer in "a" to the right by the number - of bits specified in the corresponding element of "b", and store the results in "dst". - - DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src >> count) OR (src << (64 - count)) - } - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst". + +DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - IF count[63:0] > 31 + + + + + + Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF count[63:0] > 31 dst[i+31:i] := 0 - ELSE + ELSE dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[63:0]) - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - IF imm8[7:0] > 31 + + + + + + Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 dst[i+31:i] := 0 - ELSE + ELSE dst[i+31:i] := ZeroExtend32(a[i+31:i] << imm8[7:0]) - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - IF count[63:0] > 31 + + + + + Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF count[63:0] > 31 dst[i+31:i] := 0 - ELSE + ELSE dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[63:0]) - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - IF imm8[7:0] > 31 + + + + + Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 dst[i+31:i] := 0 - ELSE + ELSE dst[i+31:i] := ZeroExtend32(a[i+31:i] << imm8[7:0]) - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - IF count[63:0] > 31 + + + + + + Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF count[63:0] > 31 dst[i+31:i] := 0 - ELSE + ELSE dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[63:0]) - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - IF imm8[7:0] > 31 + + + + + + Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 dst[i+31:i] := 0 - ELSE + ELSE dst[i+31:i] := ZeroExtend32(a[i+31:i] << imm8[7:0]) - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - IF count[63:0] > 31 + + + + + Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF count[63:0] > 31 dst[i+31:i] := 0 - ELSE + ELSE dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[63:0]) - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - IF imm8[7:0] > 31 + + + + + Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 dst[i+31:i] := 0 - ELSE + ELSE dst[i+31:i] := ZeroExtend32(a[i+31:i] << imm8[7:0]) - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - IF count[63:0] > 63 + + + + + + Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF count[63:0] > 63 dst[i+63:i] := 0 - ELSE + ELSE dst[i+63:i] := ZeroExtend64(a[i+63:i] << count[63:0]) - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - IF imm8[7:0] > 63 + + + + + + Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 dst[i+63:i] := 0 - ELSE + ELSE dst[i+63:i] := ZeroExtend64(a[i+63:i] << imm8[7:0]) - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - IF count[63:0] > 63 + + + + + Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF count[63:0] > 63 dst[i+63:i] := 0 - ELSE + ELSE dst[i+63:i] := ZeroExtend64(a[i+63:i] << count[63:0]) - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - IF imm8[7:0] > 63 + + + + + Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 dst[i+63:i] := 0 - ELSE + ELSE dst[i+63:i] := ZeroExtend64(a[i+63:i] << imm8[7:0]) - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - IF count[63:0] > 63 + + + + + + Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF count[63:0] > 63 dst[i+63:i] := 0 - ELSE + ELSE dst[i+63:i] := ZeroExtend64(a[i+63:i] << count[63:0]) - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - IF imm8[7:0] > 63 + + + + + + Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 dst[i+63:i] := 0 - ELSE + ELSE dst[i+63:i] := ZeroExtend64(a[i+63:i] << imm8[7:0]) - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - IF count[63:0] > 63 + + + + + Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF count[63:0] > 63 dst[i+63:i] := 0 - ELSE + ELSE dst[i+63:i] := ZeroExtend64(a[i+63:i] << count[63:0]) - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - IF imm8[7:0] > 63 + + + + + Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 dst[i+63:i] := 0 - ELSE + ELSE dst[i+63:i] := ZeroExtend64(a[i+63:i] << imm8[7:0]) - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 32-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - IF count[i+31:i] < 32 + + + + + + Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF count[i+31:i] < 32 dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[i+31:i]) - ELSE + ELSE dst[i+31:i] := 0 - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - IF count[i+31:i] < 32 + + + + + Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF count[i+31:i] < 32 dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ELSE + ELSE dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 32-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - IF count[i+31:i] < 32 + + + + + + Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF count[i+31:i] < 32 dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[i+31:i]) - ELSE + ELSE dst[i+31:i] := 0 - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - IF count[i+31:i] < 32 + + + + + Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF count[i+31:i] < 32 dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ELSE + ELSE dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - IF count[i+63:i] < 64 + + + + + + Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF count[i+63:i] < 64 dst[i+63:i] := ZeroExtend64(a[i+63:i] << count[i+63:i]) - ELSE + ELSE dst[i+63:i] := 0 - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - IF count[i+63:i] < 64 + + + + + Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF count[i+63:i] < 64 dst[i+63:i] := ZeroExtend64(a[i+63:i] << count[i+63:i]) - ELSE + ELSE dst[i+63:i] := 0 - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - IF count[i+63:i] < 64 + + + + + + Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF count[i+63:i] < 64 dst[i+63:i] := ZeroExtend64(a[i+63:i] << count[i+63:i]) - ELSE + ELSE dst[i+63:i] := 0 - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - IF count[i+63:i] < 64 + + + + + Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF count[i+63:i] < 64 dst[i+63:i] := ZeroExtend64(a[i+63:i] << count[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ELSE + ELSE dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 32-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - IF count[63:0] > 31 + + + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF count[63:0] > 31 dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) - ELSE + ELSE dst[i+31:i] := SignExtend32(a[i+31:i] >> count[63:0]) - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - IF imm8[7:0] > 31 + + + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) - ELSE + ELSE dst[i+31:i] := SignExtend32(a[i+31:i] >> imm8[7:0]) - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - IF count[63:0] > 31 + + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF count[63:0] > 31 dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) - ELSE + ELSE dst[i+31:i] := SignExtend32(a[i+31:i] >> count[63:0]) - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - IF imm8[7:0] > 31 + + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) - ELSE + ELSE dst[i+31:i] := SignExtend32(a[i+31:i] >> imm8[7:0]) - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 32-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - IF count[63:0] > 31 + + + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF count[63:0] > 31 dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) - ELSE + ELSE dst[i+31:i] := SignExtend32(a[i+31:i] >> count[63:0]) - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - IF imm8[7:0] > 31 + + + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) - ELSE + ELSE dst[i+31:i] := SignExtend32(a[i+31:i] >> imm8[7:0]) - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - IF count[63:0] > 31 + + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF count[63:0] > 31 dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) - ELSE + ELSE dst[i+31:i] := SignExtend32(a[i+31:i] >> count[63:0]) - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - IF imm8[7:0] > 31 + + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) - ELSE + ELSE dst[i+31:i] := SignExtend32(a[i+31:i] >> imm8[7:0]) - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - IF count[63:0] > 63 + + + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF count[63:0] > 63 dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0) - ELSE + ELSE dst[i+63:i] := SignExtend64(a[i+63:i] >> count[63:0]) - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - IF imm8[7:0] > 63 + + + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0) - ELSE + ELSE dst[i+63:i] := SignExtend64(a[i+63:i] >> imm8[7:0]) - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - IF count[63:0] > 63 + + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF count[63:0] > 63 dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0) - ELSE + ELSE dst[i+63:i] := SignExtend64(a[i+63:i] >> count[63:0]) - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - IF imm8[7:0] > 63 + + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0) - ELSE + ELSE dst[i+63:i] := SignExtend64(a[i+63:i] >> imm8[7:0]) - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - IF count[63:0] > 63 - dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0) - ELSE - dst[i+63:i] := SignExtend64(a[i+63:i] >> count[63:0]) - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF count[63:0] > 63 + dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0) + ELSE + dst[i+63:i] := SignExtend64(a[i+63:i] >> count[63:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - IF imm8[7:0] > 63 - dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0) - ELSE - dst[i+63:i] := SignExtend64(a[i+63:i] >> imm8[7:0]) - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF imm8[7:0] > 63 + dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0) + ELSE + dst[i+63:i] := SignExtend64(a[i+63:i] >> imm8[7:0]) + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - IF count[63:0] > 63 + + + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF count[63:0] > 63 dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0) - ELSE + ELSE dst[i+63:i] := SignExtend64(a[i+63:i] >> count[63:0]) - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - IF imm8[7:0] > 63 + + + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0) - ELSE + ELSE dst[i+63:i] := SignExtend64(a[i+63:i] >> imm8[7:0]) - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - IF count[63:0] > 63 + + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF count[63:0] > 63 dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0) - ELSE + ELSE dst[i+63:i] := SignExtend64(a[i+63:i] >> count[63:0]) - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - IF imm8[7:0] > 63 + + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0) - ELSE + ELSE dst[i+63:i] := SignExtend64(a[i+63:i] >> imm8[7:0]) - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - IF count[63:0] > 63 - dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0) - ELSE - dst[i+63:i] := SignExtend64(a[i+63:i] >> count[63:0]) - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF count[63:0] > 63 + dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0) + ELSE + dst[i+63:i] := SignExtend64(a[i+63:i] >> count[63:0]) + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - IF imm8[7:0] > 63 - dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0) - ELSE - dst[i+63:i] := SignExtend64(a[i+63:i] >> imm8[7:0]) - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF imm8[7:0] > 63 + dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0) + ELSE + dst[i+63:i] := SignExtend64(a[i+63:i] >> imm8[7:0]) + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 32-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - IF count[i+31:i] < 32 + + + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF count[i+31:i] < 32 dst[i+31:i] := SignExtend32(a[i+31:i] >> count[i+31:i]) - ELSE + ELSE dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0) - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - IF count[i+31:i] < 32 + + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF count[i+31:i] < 32 dst[i+31:i] := SignExtend32(a[i+31:i] >> count[i+31:i]) - ELSE + ELSE dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0) - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 32-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - IF count[i+31:i] < 32 + + + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF count[i+31:i] < 32 dst[i+31:i] := SignExtend32(a[i+31:i] >> count[i+31:i]) - ELSE + ELSE dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0) - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - IF count[i+31:i] < 32 + + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF count[i+31:i] < 32 dst[i+31:i] := SignExtend32(a[i+31:i] >> count[i+31:i]) - ELSE + ELSE dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0) - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - IF count[i+63:i] < 64 + + + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF count[i+63:i] < 64 dst[i+63:i] := SignExtend64(a[i+63:i] >> count[i+63:i]) - ELSE + ELSE dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0) - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - IF count[i+63:i] < 64 + + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF count[i+63:i] < 64 dst[i+63:i] := SignExtend64(a[i+63:i] >> count[i+63:i]) - ELSE + ELSE dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0) - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst". - - FOR j := 0 to 3 - i := j*64 - IF count[i+63:i] < 64 - dst[i+63:i] := SignExtend64(a[i+63:i] >> count[i+63:i]) - ELSE - dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0) - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF count[i+63:i] < 64 + dst[i+63:i] := SignExtend64(a[i+63:i] >> count[i+63:i]) + ELSE + dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0) + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - IF count[i+63:i] < 64 + + + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF count[i+63:i] < 64 dst[i+63:i] := SignExtend64(a[i+63:i] >> count[i+63:i]) - ELSE + ELSE dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0) - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - IF count[i+63:i] < 64 + + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF count[i+63:i] < 64 dst[i+63:i] := SignExtend64(a[i+63:i] >> count[i+63:i]) - ELSE + ELSE dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0) - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst". - - FOR j := 0 to 1 - i := j*64 - IF count[i+63:i] < 64 - dst[i+63:i] := SignExtend64(a[i+63:i] >> count[i+63:i]) - ELSE - dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0) - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF count[i+63:i] < 64 + dst[i+63:i] := SignExtend64(a[i+63:i] >> count[i+63:i]) + ELSE + dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0) + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - IF count[63:0] > 31 + + + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF count[63:0] > 31 dst[i+31:i] := 0 - ELSE + ELSE dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[63:0]) - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - IF imm8[7:0] > 31 + + + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 dst[i+31:i] := 0 - ELSE + ELSE dst[i+31:i] := ZeroExtend32(a[i+31:i] >> imm8[7:0]) - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - IF count[63:0] > 31 + + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF count[63:0] > 31 dst[i+31:i] := 0 - ELSE + ELSE dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[63:0]) - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - IF imm8[7:0] > 31 + + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 dst[i+31:i] := 0 - ELSE + ELSE dst[i+31:i] := ZeroExtend32(a[i+31:i] >> imm8[7:0]) - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - IF count[63:0] > 31 + + + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF count[63:0] > 31 dst[i+31:i] := 0 - ELSE + ELSE dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[63:0]) - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - IF imm8[7:0] > 31 + + + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 dst[i+31:i] := 0 - ELSE + ELSE dst[i+31:i] := ZeroExtend32(a[i+31:i] >> imm8[7:0]) - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - IF count[63:0] > 31 + + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF count[63:0] > 31 dst[i+31:i] := 0 - ELSE + ELSE dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[63:0]) - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - IF imm8[7:0] > 31 + + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 dst[i+31:i] := 0 - ELSE + ELSE dst[i+31:i] := ZeroExtend32(a[i+31:i] >> imm8[7:0]) - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - IF count[63:0] > 63 + + + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF count[63:0] > 63 dst[i+63:i] := 0 - ELSE + ELSE dst[i+63:i] := ZeroExtend64(a[i+63:i] >> count[63:0]) - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - IF imm8[7:0] > 63 + + + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 dst[i+63:i] := 0 - ELSE + ELSE dst[i+63:i] := ZeroExtend64(a[i+63:i] >> imm8[7:0]) - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - IF count[63:0] > 63 + + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF count[63:0] > 63 dst[i+63:i] := 0 - ELSE + ELSE dst[i+63:i] := ZeroExtend64(a[i+63:i] >> count[63:0]) - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - IF imm8[7:0] > 63 + + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 dst[i+63:i] := 0 - ELSE + ELSE dst[i+63:i] := ZeroExtend64(a[i+63:i] >> imm8[7:0]) - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - IF count[63:0] > 63 + + + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF count[63:0] > 63 dst[i+63:i] := 0 - ELSE + ELSE dst[i+63:i] := ZeroExtend64(a[i+63:i] >> count[63:0]) - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - IF imm8[7:0] > 63 + + + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 dst[i+63:i] := 0 - ELSE + ELSE dst[i+63:i] := ZeroExtend64(a[i+63:i] >> imm8[7:0]) - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - IF count[63:0] > 63 + + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF count[63:0] > 63 dst[i+63:i] := 0 - ELSE + ELSE dst[i+63:i] := ZeroExtend64(a[i+63:i] >> count[63:0]) - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - IF imm8[7:0] > 63 + + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 dst[i+63:i] := 0 - ELSE + ELSE dst[i+63:i] := ZeroExtend64(a[i+63:i] >> imm8[7:0]) - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 32-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - IF count[i+31:i] < 32 + + + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF count[i+31:i] < 32 dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[i+31:i]) - ELSE + ELSE dst[i+31:i] := 0 - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - IF count[i+31:i] < 32 + + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + IF count[i+31:i] < 32 dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[i+31:i]) - ELSE + ELSE dst[i+31:i] := 0 - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 32-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - IF count[i+31:i] < 32 + + + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF count[i+31:i] < 32 dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[i+31:i]) - ELSE + ELSE dst[i+31:i] := 0 - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - IF count[i+31:i] < 32 + + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + IF count[i+31:i] < 32 dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ELSE + ELSE dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - IF count[i+63:i] < 64 + + + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF count[i+63:i] < 64 dst[i+63:i] := ZeroExtend64(a[i+63:i] >> count[i+63:i]) - ELSE + ELSE dst[i+63:i] := 0 - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - IF count[i+63:i] < 64 + + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + IF count[i+63:i] < 64 dst[i+63:i] := ZeroExtend64(a[i+63:i] >> count[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ELSE + ELSE dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - IF count[i+63:i] < 64 + + + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF count[i+63:i] < 64 dst[i+63:i] := ZeroExtend64(a[i+63:i] >> count[i+63:i]) - ELSE + ELSE dst[i+63:i] := 0 - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - IF count[i+63:i] < 64 + + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + IF count[i+63:i] < 64 dst[i+63:i] := ZeroExtend64(a[i+63:i] >> count[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ELSE + ELSE dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512F + AVX512VL +
immintrin.h
+ Shift
- - - - - Compute the square root of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := SQRT(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Elementary Math Functions + + + + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := SQRT(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Elementary Math Functions
- - - - Compute the square root of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := SQRT(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Elementary Math Functions + + + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := SQRT(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the square root of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := SQRT(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Elementary Math Functions + + + + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := SQRT(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Elementary Math Functions
- - - - Compute the square root of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := SQRT(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Elementary Math Functions + + + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := SQRT(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the square root of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := SQRT(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Elementary Math Functions + + + + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := SQRT(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Elementary Math Functions
- - - - Compute the square root of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := SQRT(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Elementary Math Functions + + + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := SQRT(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the square root of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := SQRT(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Elementary Math Functions + + + + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := SQRT(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Elementary Math Functions
- - - - Compute the square root of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := SQRT(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F - AVX512VL -
immintrin.h
- Elementary Math Functions -
- - - - - - - Perform the last round of an AES encryption flow on data (state) in "a" using - the round key in "RoundKey", and store the results in "dst"." - FOR j := 0 to 3 - i := j*128 - a[i+127:i] := ShiftRows(a[i+127:i]) - a[i+127:i] := SubBytes(a[i+127:i]) - dst[i+127:i] := a[i+127:i] XOR RoundKey[i+127:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F - VAES -
immintrin.h
- Cryptography + + + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := SQRT(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F + AVX512VL +
immintrin.h
+ Elementary Math Functions +
+ + + + + + + Perform the last round of an AES encryption flow on data (state) in "a" using the round key in "RoundKey", and store the results in "dst"." + FOR j := 0 to 3 + i := j*128 + a[i+127:i] := ShiftRows(a[i+127:i]) + a[i+127:i] := SubBytes(a[i+127:i]) + dst[i+127:i] := a[i+127:i] XOR RoundKey[i+127:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F + VAES +
immintrin.h
+ Cryptography
- - - - Perform one round of an AES encryption flow on data (state) in "a" using the - round key in "RoundKey", and store the results in "dst"." - FOR j := 0 to 3 - i := j*128 - a[i+127:i] := ShiftRows(a[i+127:i]) - a[i+127:i] := SubBytes(a[i+127:i]) - a[i+127:i] := MixColumns(a[i+127:i]) - dst[i+127:i] := a[i+127:i] XOR RoundKey[i+127:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F - VAES -
immintrin.h
- Cryptography + + + + Perform one round of an AES encryption flow on data (state) in "a" using the round key in "RoundKey", and store the results in "dst"." + FOR j := 0 to 3 + i := j*128 + a[i+127:i] := ShiftRows(a[i+127:i]) + a[i+127:i] := SubBytes(a[i+127:i]) + a[i+127:i] := MixColumns(a[i+127:i]) + dst[i+127:i] := a[i+127:i] XOR RoundKey[i+127:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F + VAES +
immintrin.h
+ Cryptography
- - - - Perform the last round of an AES decryption flow on data (state) in "a" using - the round key in "RoundKey", and store the results in "dst". - FOR j := 0 to 3 - i := j*128 - a[i+127:i] := InvShiftRows(a[i+127:i]) - a[i+127:i] := InvSubBytes(a[i+127:i]) - dst[i+127:i] := a[i+127:i] XOR RoundKey[i+127:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F - VAES -
immintrin.h
- Cryptography + + + + Perform the last round of an AES decryption flow on data (state) in "a" using the round key in "RoundKey", and store the results in "dst". + FOR j := 0 to 3 + i := j*128 + a[i+127:i] := InvShiftRows(a[i+127:i]) + a[i+127:i] := InvSubBytes(a[i+127:i]) + dst[i+127:i] := a[i+127:i] XOR RoundKey[i+127:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F + VAES +
immintrin.h
+ Cryptography
- - - - Perform one round of an AES decryption flow on data (state) in "a" using the - round key in "RoundKey", and store the results in "dst". - FOR j := 0 to 3 - i := j*128 - a[i+127:i] := InvShiftRows(a[i+127:i]) - a[i+127:i] := InvSubBytes(a[i+127:i]) - a[i+127:i] := InvMixColumns(a[i+127:i]) - dst[i+127:i] := a[i+127:i] XOR RoundKey[i+127:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F - VAES -
immintrin.h
- Cryptography -
- - - - - - - - Multiply the packed 32-bit integers in "a" and "b", producing intermediate - 64-bit integers, and store the low 32 bits of the intermediate integers in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - tmp[63:0] := a[i+31:i] * b[i+31:i] - dst[i+31:i] := tmp[31:0] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + Perform one round of an AES decryption flow on data (state) in "a" using the round key in "RoundKey", and store the results in "dst". + FOR j := 0 to 3 + i := j*128 + a[i+127:i] := InvShiftRows(a[i+127:i]) + a[i+127:i] := InvSubBytes(a[i+127:i]) + a[i+127:i] := InvMixColumns(a[i+127:i]) + dst[i+127:i] := a[i+127:i] XOR RoundKey[i+127:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F + VAES +
immintrin.h
+ Cryptography +
+ + + + + + + + Multiply the packed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + tmp[63:0] := a[i+31:i] * b[i+31:i] + dst[i+31:i] := tmp[31:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Add packed double-precision (64-bit) floating-point elements in "a" and "b", - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] + b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Add packed double-precision (64-bit) floating-point elements in "a" and "b", - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] + b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Add packed single-precision (32-bit) floating-point elements in "a" and "b", - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] + b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Add packed single-precision (32-bit) floating-point elements in "a" and "b", - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] + b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Add the lower double-precision (64-bit) floating-point element in "a" and "b", - store the result in the lower element of "dst", and copy the upper element from "a" to - the upper element of "dst". - [round_note] - - dst[63:0] := a[63:0] + b[63:0] - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Add the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + +dst[63:0] := a[63:0] + b[63:0] +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Add the lower double-precision (64-bit) floating-point element in "a" and "b", - store the result in the lower element of "dst" using writemask "k" (the element is - copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to - the upper element of "dst". - [round_note] - - IF k[0] - dst[63:0] := a[63:0] + b[63:0] - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Add the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := a[63:0] + b[63:0] +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Add the lower double-precision (64-bit) floating-point element in "a" and "b", - store the result in the lower element of "dst" using writemask "k" (the element is - copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to - the upper element of "dst". - - IF k[0] - dst[63:0] := a[63:0] + b[63:0] - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Add the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := a[63:0] + b[63:0] +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Add the lower double-precision (64-bit) floating-point element in "a" and "b", - store the result in the lower element of "dst" using zeromask "k" (the element is zeroed - out when mask bit 0 is not set), and copy the upper element from "a" to the upper - element of "dst". - [round_note] - - IF k[0] - dst[63:0] := a[63:0] + b[63:0] - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Add the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := a[63:0] + b[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Add the lower double-precision (64-bit) floating-point element in "a" and "b", - store the result in the lower element of "dst" using zeromask "k" (the element is zeroed - out when mask bit 0 is not set), and copy the upper element from "a" to the upper - element of "dst". - - IF k[0] - dst[63:0] := a[63:0] + b[63:0] - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Add the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := a[63:0] + b[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Add the lower single-precision (32-bit) floating-point element in "a" and "b", - store the result in the lower element of "dst", and copy the upper 3 packed elements - from "a" to the upper elements of "dst". - [round_note] - - dst[31:0] := a[31:0] + b[31:0] - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Add the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst[31:0] := a[31:0] + b[31:0] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Add the lower single-precision (32-bit) floating-point element in "a" and "b", - store the result in the lower element of "dst" using writemask "k" (the element is - copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from - "a" to the upper elements of "dst". - [round_note] - - IF k[0] - dst[31:0] := a[31:0] + b[31:0] - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Add the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := a[31:0] + b[31:0] +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Add the lower single-precision (32-bit) floating-point element in "a" and "b", - store the result in the lower element of "dst" using writemask "k" (the element is - copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from - "a" to the upper elements of "dst". - - IF k[0] - dst[31:0] := a[31:0] + b[31:0] - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Add the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := a[31:0] + b[31:0] +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Add the lower single-precision (32-bit) floating-point element in "a" and "b", - store the result in the lower element of "dst" using zeromask "k" (the element is zeroed - out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the - upper elements of "dst". - [round_note] - - IF k[0] - dst[31:0] := a[31:0] + b[31:0] - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Add the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := a[31:0] + b[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Add the lower single-precision (32-bit) floating-point element in "a" and "b", - store the result in the lower element of "dst" using zeromask "k" (the element is zeroed - out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the - upper elements of "dst". - - IF k[0] - dst[31:0] := a[31:0] + b[31:0] - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Add the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := a[31:0] + b[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Divide packed double-precision (64-bit) floating-point elements in "a" by - packed elements in "b", and store the results in "dst". - - FOR j := 0 to 7 - i := 64*j - dst[i+63:i] := a[i+63:i] / b[i+63:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + dst[i+63:i] := a[i+63:i] / b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Divide packed double-precision (64-bit) floating-point elements in "a" by - packed elements in "b", =and store the results in "dst". - [round_note] - - FOR j := 0 to 7 - i := 64*j - dst[i+63:i] := a[i+63:i] / b[i+63:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", =and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := 64*j + dst[i+63:i] := a[i+63:i] / b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Divide packed double-precision (64-bit) floating-point elements in "a" by - packed elements in "b", and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - IF k[j] - dst[i+63:i] := a[i+63:i] / b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + IF k[j] + dst[i+63:i] := a[i+63:i] / b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Divide packed double-precision (64-bit) floating-point elements in "a" by - packed elements in "b", and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := 64*j - IF k[j] - dst[i+63:i] := a[i+63:i] / b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := 64*j + IF k[j] + dst[i+63:i] := a[i+63:i] / b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Divide packed double-precision (64-bit) floating-point elements in "a" by - packed elements in "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - IF k[j] - dst[i+63:i] := a[i+63:i] / b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + IF k[j] + dst[i+63:i] := a[i+63:i] / b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Divide packed double-precision (64-bit) floating-point elements in "a" by - packed elements in "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := 64*j - IF k[j] - dst[i+63:i] := a[i+63:i] / b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := 64*j + IF k[j] + dst[i+63:i] := a[i+63:i] / b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Divide packed single-precision (32-bit) floating-point elements in "a" by - packed elements in "b", and store the results in "dst". - - FOR j := 0 to 15 - i := 32*j - dst[i+31:i] := a[i+31:i] / b[i+31:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := a[i+31:i] / b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Divide packed single-precision (32-bit) floating-point elements in "a" by - packed elements in "b", and store the results in "dst". - [round_note] - - FOR j := 0 to 15 - i := 32*j - dst[i+31:i] := a[i+31:i] / b[i+31:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst". + [round_note] + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := a[i+31:i] / b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Divide packed single-precision (32-bit) floating-point elements in "a" by - packed elements in "b", and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - IF k[j] - dst[i+31:i] := a[i+31:i] / b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := a[i+31:i] / b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Divide packed single-precision (32-bit) floating-point elements in "a" by - packed elements in "b", and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 15 - i := 32*j - IF k[j] - dst[i+31:i] := a[i+31:i] / b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := a[i+31:i] / b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Divide packed single-precision (32-bit) floating-point elements in "a" by - packed elements in "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - IF k[j] - dst[i+31:i] := a[i+31:i] / b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := a[i+31:i] / b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Divide packed single-precision (32-bit) floating-point elements in "a" by - packed elements in "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 15 - i := 32*j - IF k[j] - dst[i+31:i] := a[i+31:i] / b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := a[i+31:i] / b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Divide the lower double-precision (64-bit) floating-point element in "a" by the - lower double-precision (64-bit) floating-point element in "b", store the result in the - lower element of "dst", and copy the upper element from "a" to the upper element of - "dst". - [round_note] - - dst[63:0] := a[63:0] / b[63:0] - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Divide the lower double-precision (64-bit) floating-point element in "a" by the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + +dst[63:0] := a[63:0] / b[63:0] +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Divide the lower double-precision (64-bit) floating-point element in "a" by the - lower double-precision (64-bit) floating-point element in "b", store the result in the - lower element of "dst" using writemask "k" (the element is copied from "src" when mask - bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". - [round_note] - - IF k[0] - dst[63:0] := a[63:0] / b[63:0] - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Divide the lower double-precision (64-bit) floating-point element in "a" by the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := a[63:0] / b[63:0] +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Divide the lower double-precision (64-bit) floating-point element in "a" by the - lower double-precision (64-bit) floating-point element in "b", store the result in the - lower element of "dst" using writemask "k" (the element is copied from "src" when mask - bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". - - IF k[0] - dst[63:0] := a[63:0] / b[63:0] - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Divide the lower double-precision (64-bit) floating-point element in "a" by the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := a[63:0] / b[63:0] +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Divide the lower double-precision (64-bit) floating-point element in "a" by the - lower double-precision (64-bit) floating-point element in "b", store the result in the - lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is - not set), and copy the upper element from "a" to the upper element of "dst". - [round_note] - - IF k[0] - dst[63:0] := a[63:0] / b[63:0] - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Divide the lower double-precision (64-bit) floating-point element in "a" by the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := a[63:0] / b[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Divide the lower double-precision (64-bit) floating-point element in "a" by the - lower double-precision (64-bit) floating-point element in "b", store the result in the - lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is - not set), and copy the upper element from "a" to the upper element of "dst". - - IF k[0] - dst[63:0] := a[63:0] / b[63:0] - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Divide the lower double-precision (64-bit) floating-point element in "a" by the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := a[63:0] / b[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Divide the lower single-precision (32-bit) floating-point element in "a" by the - lower single-precision (32-bit) floating-point element in "b", store the result in the - lower element of "dst", and copy the upper 3 packed elements from "a" to the upper - elements of "dst". - [round_note] - - dst[31:0] := a[31:0] / b[31:0] - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Divide the lower single-precision (32-bit) floating-point element in "a" by the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst[31:0] := a[31:0] / b[31:0] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Divide the lower single-precision (32-bit) floating-point element in "a" by the - lower single-precision (32-bit) floating-point element in "b", store the result in the - lower element of "dst" using writemask "k" (the element is copied from "src" when mask - bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements - of "dst". - [round_note] - - IF k[0] - dst[31:0] := a[31:0] / b[31:0] - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Divide the lower single-precision (32-bit) floating-point element in "a" by the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := a[31:0] / b[31:0] +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Divide the lower single-precision (32-bit) floating-point element in "a" by the - lower single-precision (32-bit) floating-point element in "b", store the result in the - lower element of "dst" using writemask "k" (the element is copied from "src" when mask - bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements - of "dst". - - IF k[0] - dst[31:0] := a[31:0] / b[31:0] - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Divide the lower single-precision (32-bit) floating-point element in "a" by the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := a[31:0] / b[31:0] +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Divide the lower single-precision (32-bit) floating-point element in "a" by the - lower single-precision (32-bit) floating-point element in "b", store the result in the - lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is - not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". - [round_note] - - IF k[0] - dst[31:0] := a[31:0] / b[31:0] - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Divide the lower single-precision (32-bit) floating-point element in "a" by the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := a[31:0] / b[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Divide the lower single-precision (32-bit) floating-point element in "a" by the - lower single-precision (32-bit) floating-point element in "b", store the result in the - lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is - not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". - - IF k[0] - dst[31:0] := a[31:0] / b[31:0] - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Divide the lower single-precision (32-bit) floating-point element in "a" by the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := a[31:0] / b[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in "a" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "a" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and add the intermediate result to the lower element in "c". Store the result in - the lower element of "dst", and copy the upper element from "a" to the upper element of - "dst". - [round_note] - - dst[63:0] := (a[63:0] * b[63:0]) + c[63:0] - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + +dst[63:0] := (a[63:0] * b[63:0]) + c[63:0] +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and add the intermediate result to the lower element in "c". Store the result in - the lower element of "dst" using writemask "k" (the element is copied from "c" when mask - bit 0 is not set), and copy the upper element from "c" to the upper element of "dst". - [round_note] - - IF k[0] - dst[63:0] := (a[63:0] * b[63:0]) + c[63:0] - ELSE - dst[63:0] := c[63:0] - FI - dst[127:64] := c[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "c" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := (a[63:0] * b[63:0]) + c[63:0] +ELSE + dst[63:0] := c[63:0] +FI +dst[127:64] := c[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and add the intermediate result to the lower element in "c". Store the result in - the lower element of "dst" using writemask "k" (the element is copied from "c" when mask - bit 0 is not set), and copy the upper element from "c" to the upper element of "dst". - - IF k[0] - dst[63:0] := (a[63:0] * b[63:0]) + c[63:0] - ELSE - dst[63:0] := c[63:0] - FI - dst[127:64] := c[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "c" to the upper element of "dst". + +IF k[0] + dst[63:0] := (a[63:0] * b[63:0]) + c[63:0] +ELSE + dst[63:0] := c[63:0] +FI +dst[127:64] := c[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and add the intermediate result to the lower element in "c". Store the result in - the lower element of "dst" using writemask "k" (the element is copied from "a" when mask - bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". - [round_note] - - IF k[0] - dst[63:0] := (a[63:0] * b[63:0]) + c[63:0] - ELSE - dst[63:0] := a[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := (a[63:0] * b[63:0]) + c[63:0] +ELSE + dst[63:0] := a[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and add the intermediate result to the lower element in "c". Store the result in - the lower element of "dst" using writemask "k" (the element is copied from "a" when mask - bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". - - IF k[0] - dst[63:0] := (a[63:0] * b[63:0]) + c[63:0] - ELSE - dst[63:0] := a[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := (a[63:0] * b[63:0]) + c[63:0] +ELSE + dst[63:0] := a[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and add the intermediate result to the lower element in "c". Store the result in - the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 - is not set), and copy the upper element from "a" to the upper element of "dst". - [round_note] - - IF k[0] - dst[63:0] := (a[63:0] * b[63:0]) + c[63:0] - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := (a[63:0] * b[63:0]) + c[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and add the intermediate result to the lower element in "c". Store the result in - the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 - is not set), and copy the upper element from "a" to the upper element of "dst". - - IF k[0] - dst[63:0] := (a[63:0] * b[63:0]) + c[63:0] - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := (a[63:0] * b[63:0]) + c[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and add the intermediate result to the lower element in "c". Store the result in - the lower element of "dst" using writemask "k" (the element is copied from "c" when mask - bit 0 is not set), and copy the upper 3 packed elements from "c" to the upper elements - of "dst". - [round_note] - - IF k[0] - dst[31:0] := (a[31:0] * b[31:0]) + c[31:0] - ELSE - dst[31:0] := c[31:0] - FI - dst[127:32] := c[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "c" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := (a[31:0] * b[31:0]) + c[31:0] +ELSE + dst[31:0] := c[31:0] +FI +dst[127:32] := c[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and add the intermediate result to the lower element in "c". Store the result in - the lower element of "dst" using writemask "k" (the element is copied from "c" when mask - bit 0 is not set), and copy the upper 3 packed elements from "c" to the upper elements - of "dst". - - IF k[0] - dst[31:0] := (a[31:0] * b[31:0]) + c[31:0] - ELSE - dst[31:0] := c[31:0] - FI - dst[127:32] := c[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "c" to the upper elements of "dst". + +IF k[0] + dst[31:0] := (a[31:0] * b[31:0]) + c[31:0] +ELSE + dst[31:0] := c[31:0] +FI +dst[127:32] := c[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and add the intermediate result to the lower element in "c". Store the result in - the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper - elements of "dst". - [round_note] - - dst[31:0] := (a[31:0] * b[31:0]) + c[31:0] - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst[31:0] := (a[31:0] * b[31:0]) + c[31:0] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and add the intermediate result to the lower element in "c". Store the result in - the lower element of "dst" using writemask "k" (the element is copied from "a" when mask - bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements - of "dst". - [round_note] - - IF k[0] - dst[31:0] := (a[31:0] * b[31:0]) + c[31:0] - ELSE - dst[31:0] := a[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := (a[31:0] * b[31:0]) + c[31:0] +ELSE + dst[31:0] := a[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and add the intermediate result to the lower element in "c". Store the result in - the lower element of "dst" using writemask "k" (the element is copied from "a" when mask - bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements - of "dst". - - IF k[0] - dst[31:0] := (a[31:0] * b[31:0]) + c[31:0] - ELSE - dst[31:0] := a[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := (a[31:0] * b[31:0]) + c[31:0] +ELSE + dst[31:0] := a[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and add the intermediate result to the lower element in "c". Store the result in - the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 - is not set), and copy the upper 3 packed elements from "a" to the upper elements of - "dst". - [round_note] - - IF k[0] - dst[31:0] := (a[31:0] * b[31:0]) + c[31:0] - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := (a[31:0] * b[31:0]) + c[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and add the intermediate result to the lower element in "c". Store the result in - the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 - is not set), and copy the upper 3 packed elements from "a" to the upper elements of - "dst". - - IF k[0] - dst[31:0] := (a[31:0] * b[31:0]) + c[31:0] - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := (a[31:0] * b[31:0]) + c[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - IF ((j & 1) == 0) + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + IF ((j & 1) == 0) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic +
+ + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF ((j & 1) == 0) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic +
+ + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF ((j & 1) == 0) dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE + ELSE dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic -
- - - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst". - [round_note] - - FOR j := 0 to 7 - i := j*64 - IF ((j & 1) == 0) - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic -
- - - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "c" - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF ((j & 1) == 0) - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - FI - ELSE - dst[i+63:i] := c[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "c" - when the corresponding mask bit is not set). [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF ((j & 1) == 0) + + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF ((j & 1) == 0) dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE + ELSE dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - FI - ELSE - dst[i+63:i] := c[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "a" - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF ((j & 1) == 0) dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE + ELSE dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - FI - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "a" - when the corresponding mask bit is not set). [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF ((j & 1) == 0) + + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF ((j & 1) == 0) dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE + ELSE dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - FI - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF ((j & 1) == 0) dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE + ELSE dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF ((j & 1) == 0) + + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF ((j & 1) == 0) dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE + ELSE dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - IF ((j & 1) == 0) - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + IF ((j & 1) == 0) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst". - [round_note] - - FOR j := 0 to 15 - i := j*32 - IF ((j & 1) == 0) - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst". + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF ((j & 1) == 0) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "c" - when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF ((j & 1) == 0) dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE + ELSE dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - FI - ELSE - dst[i+31:i] := c[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "c" - when the corresponding mask bit is not set). [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF ((j & 1) == 0) + + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF ((j & 1) == 0) dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE + ELSE dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - FI - ELSE - dst[i+31:i] := c[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "a" - when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF ((j & 1) == 0) dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE + ELSE dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - FI - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "a" - when the corresponding mask bit is not set). [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF ((j & 1) == 0) + + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF ((j & 1) == 0) dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE + ELSE dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - FI - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF ((j & 1) == 0) dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE + ELSE dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF ((j & 1) == 0) + + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF ((j & 1) == 0) dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE + ELSE dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is - not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is - not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is - not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is - not set). - [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the intermediate result. Store the - result in the lower element of "dst", and copy the upper element from "a" to the upper - element of "dst". - [round_note] - - dst[63:0] := (a[63:0] * b[63:0]) - c[63:0] - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + +dst[63:0] := (a[63:0] * b[63:0]) - c[63:0] +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the intermediate result. Store the - result in the lower element of "dst" using writemask "k" (the element is copied from "c" - when mask bit 0 is not set), and copy the upper element from "c" to the upper element of - "dst". - [round_note] - - IF k[0] - dst[63:0] := (a[63:0] * b[63:0]) - c[63:0] - ELSE - dst[63:0] := c[63:0] - FI - dst[127:64] := c[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "c" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := (a[63:0] * b[63:0]) - c[63:0] +ELSE + dst[63:0] := c[63:0] +FI +dst[127:64] := c[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the intermediate result. Store the - result in the lower element of "dst" using writemask "k" (the element is copied from "c" - when mask bit 0 is not set), and copy the upper element from "c" to the upper element of - "dst". - - IF k[0] - dst[63:0] := (a[63:0] * b[63:0]) - c[63:0] - ELSE - dst[63:0] := c[63:0] - FI - dst[127:64] := c[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "c" to the upper element of "dst". + +IF k[0] + dst[63:0] := (a[63:0] * b[63:0]) - c[63:0] +ELSE + dst[63:0] := c[63:0] +FI +dst[127:64] := c[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the intermediate result. Store the - result in the lower element of "dst" using writemask "k" (the element is copied from "a" - when mask bit 0 is not set), and copy the upper element from "a" to the upper element of - "dst". - [round_note] - - IF k[0] - dst[63:0] := (a[63:0] * b[63:0]) - c[63:0] - ELSE - dst[63:0] := a[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := (a[63:0] * b[63:0]) - c[63:0] +ELSE + dst[63:0] := a[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the intermediate result. Store the - result in the lower element of "dst" using writemask "k" (the element is copied from "a" - when mask bit 0 is not set), and copy the upper element from "a" to the upper element of - "dst". - - IF k[0] - dst[63:0] := (a[63:0] * b[63:0]) - c[63:0] - ELSE - dst[63:0] := a[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := (a[63:0] * b[63:0]) - c[63:0] +ELSE + dst[63:0] := a[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the intermediate result. Store the - result in the lower element of "dst" using zeromask "k" (the element is zeroed out when - mask bit 0 is not set), and copy the upper element from "a" to the upper element of - "dst". - [round_note] - - IF k[0] - dst[63:0] := (a[63:0] * b[63:0]) - c[63:0] - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := (a[63:0] * b[63:0]) - c[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the intermediate result. Store the - result in the lower element of "dst" using zeromask "k" (the element is zeroed out when - mask bit 0 is not set), and copy the upper element from "a" to the upper element of - "dst". - - IF k[0] - dst[63:0] := (a[63:0] * b[63:0]) - c[63:0] - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := (a[63:0] * b[63:0]) - c[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the intermediate result. Store the - result in the lower element of "dst", and copy the upper 3 packed elements from "a" to - the upper elements of "dst". - [round_note] - - dst[31:0] := (a[31:0] * b[31:0]) - c[31:0] - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst[31:0] := (a[31:0] * b[31:0]) - c[31:0] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the intermediate result. Store the - result in the lower element of "dst" using writemask "k" (the element is copied from "c" - when mask bit 0 is not set), and copy the upper 3 packed elements from "c" to the upper - elements of "dst". - [round_note] - - IF k[0] - dst[31:0] := (a[31:0] * b[31:0]) - c[31:0] - ELSE - dst[31:0] := c[31:0] - FI - dst[127:32] := c[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "c" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := (a[31:0] * b[31:0]) - c[31:0] +ELSE + dst[31:0] := c[31:0] +FI +dst[127:32] := c[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the intermediate result. Store the - result in the lower element of "dst" using writemask "k" (the element is copied from "c" - when mask bit 0 is not set), and copy the upper 3 packed elements from "c" to the upper - elements of "dst". - - IF k[0] - dst[31:0] := (a[31:0] * b[31:0]) - c[31:0] - ELSE - dst[31:0] := c[31:0] - FI - dst[127:32] := c[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "c" to the upper elements of "dst". + +IF k[0] + dst[31:0] := (a[31:0] * b[31:0]) - c[31:0] +ELSE + dst[31:0] := c[31:0] +FI +dst[127:32] := c[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the intermediate result. Store the - result in the lower element of "dst" using writemask "k" (the element is copied from "a" - when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper - elements of "dst". - [round_note] - - IF k[0] - dst[31:0] := (a[31:0] * b[31:0]) - c[31:0] - ELSE - dst[31:0] := a[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := (a[31:0] * b[31:0]) - c[31:0] +ELSE + dst[31:0] := a[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the intermediate result. Store the - result in the lower element of "dst" using writemask "k" (the element is copied from "a" - when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper - elements of "dst". - - IF k[0] - dst[31:0] := (a[31:0] * b[31:0]) - c[31:0] - ELSE - dst[31:0] := a[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := (a[31:0] * b[31:0]) - c[31:0] +ELSE + dst[31:0] := a[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the intermediate result. Store the - result in the lower element of "dst" using zeromask "k" (the element is zeroed out when - mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper - elements of "dst". - [round_note] - - IF k[0] - dst[31:0] := (a[31:0] * b[31:0]) - c[31:0] - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := (a[31:0] * b[31:0]) - c[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the intermediate result. Store the - result in the lower element of "dst" using zeromask "k" (the element is zeroed out when - mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper - elements of "dst". - - IF k[0] - dst[31:0] := (a[31:0] * b[31:0]) - c[31:0] - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := (a[31:0] * b[31:0]) - c[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - IF ((j & 1) == 0) - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + IF ((j & 1) == 0) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst". - [round_note] - - FOR j := 0 to 7 - i := j*64 - IF ((j & 1) == 0) - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF ((j & 1) == 0) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "c" - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF ((j & 1) == 0) dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE + ELSE dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - FI - ELSE - dst[i+63:i] := c[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "c" - when the corresponding mask bit is not set). [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF ((j & 1) == 0) + + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF ((j & 1) == 0) dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE + ELSE dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - FI - ELSE - dst[i+63:i] := c[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "a" - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF ((j & 1) == 0) dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE + ELSE dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - FI - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "a" - when the corresponding mask bit is not set). [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF ((j & 1) == 0) + + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF ((j & 1) == 0) dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE + ELSE dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - FI - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF ((j & 1) == 0) dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE + ELSE dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF ((j & 1) == 0) + + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF ((j & 1) == 0) dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE + ELSE dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - IF ((j & 1) == 0) - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + IF ((j & 1) == 0) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst". - [round_note] - - FOR j := 0 to 15 - i := j*32 - IF ((j & 1) == 0) - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst". + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF ((j & 1) == 0) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "c" - when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF ((j & 1) == 0) dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE + ELSE dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - FI - ELSE - dst[i+31:i] := c[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "c" - when the corresponding mask bit is not set). [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF ((j & 1) == 0) + + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF ((j & 1) == 0) dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE + ELSE dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - FI - ELSE - dst[i+31:i] := c[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "a" - when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF ((j & 1) == 0) dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE + ELSE dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - FI - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst" using writemask "k" (elements are copied from "a" - when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF ((j & 1) == 0) + + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF ((j & 1) == 0) dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE + ELSE dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - FI - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF ((j & 1) == 0) dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE + ELSE dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF ((j & 1) == 0) + + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF ((j & 1) == 0) dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE + ELSE dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and add the negated intermediate result to the lower element in "c". Store the - result in the lower element of "dst", and copy the upper element from "a" to the upper - element of "dst". - [round_note] - - dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0] - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + +dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0] +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and add the negated intermediate result to the lower element in "c". Store the - result in the lower element of "dst" using writemask "k" (the element is copied from "c" - when mask bit 0 is not set), and copy the upper element from "c" to the upper element of - "dst". - [round_note] - - IF k[0] - dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0] - ELSE - dst[63:0] := c[63:0] - FI - dst[127:64] := c[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "c" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0] +ELSE + dst[63:0] := c[63:0] +FI +dst[127:64] := c[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and add the negated intermediate result to the lower element in "c". Store the - result in the lower element of "dst" using writemask "k" (the element is copied from "c" - when mask bit 0 is not set), and copy the upper element from "c" to the upper element of - "dst". - - IF k[0] - dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0] - ELSE - dst[63:0] := c[63:0] - FI - dst[127:64] := c[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "c" to the upper element of "dst". + +IF k[0] + dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0] +ELSE + dst[63:0] := c[63:0] +FI +dst[127:64] := c[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and add the negated intermediate result to the lower element in "c". Store the - result in the lower element of "dst" using writemask "k" (the element is copied from "a" - when mask bit 0 is not set), and copy the upper element from "a" to the upper element of - "dst". - [round_note] - - IF k[0] - dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0] - ELSE - dst[63:0] := a[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0] +ELSE + dst[63:0] := a[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and add the negated intermediate result to the lower element in "c". Store the - result in the lower element of "dst" using writemask "k" (the element is copied from "a" - when mask bit 0 is not set), and copy the upper element from "a" to the upper element of - "dst". - - IF k[0] - dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0] - ELSE - dst[63:0] := a[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0] +ELSE + dst[63:0] := a[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and add the negated intermediate result to the lower element in "c". Store the - result in the lower element of "dst" using zeromask "k" (the element is zeroed out when - mask bit 0 is not set), and copy the upper element from "a" to the upper element of - "dst". - [round_note] - - IF k[0] - dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0] - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and add the negated intermediate result to the lower element in "c". Store the - result in the lower element of "dst" using zeromask "k" (the element is zeroed out when - mask bit 0 is not set), and copy the upper element from "a" to the upper element of - "dst". - - IF k[0] - dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0] - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and add the negated intermediate result to the lower element in "c". Store the - result in the lower element of "dst", and copy the upper 3 packed elements from "a" to - the upper elements of "dst". - [round_note] - - dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0] - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and add the negated intermediate result to the lower element in "c". Store the - result in the lower element of "dst" using writemask "k" (the element is copied from "c" - when mask bit 0 is not set), and copy the upper 3 packed elements from "c" to the upper - elements of "dst". - [round_note] - - IF k[0] - dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0] - ELSE - dst[31:0] := c[31:0] - FI - dst[127:32] := c[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "c" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0] +ELSE + dst[31:0] := c[31:0] +FI +dst[127:32] := c[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and add the negated intermediate result to the lower element in "c". Store the - result in the lower element of "dst" using writemask "k" (the element is copied from "c" - when mask bit 0 is not set), and copy the upper 3 packed elements from "c" to the upper - elements of "dst". - - IF k[0] - dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0] - ELSE - dst[31:0] := c[31:0] - FI - dst[127:32] := c[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "c" to the upper elements of "dst". + +IF k[0] + dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0] +ELSE + dst[31:0] := c[31:0] +FI +dst[127:32] := c[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and add the negated intermediate result to the lower element in "c". Store the - result in the lower element of "dst" using writemask "k" (the element is copied from "a" - when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper - elements of "dst". - [round_note] - - IF k[0] - dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0] - ELSE - dst[31:0] := a[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0] +ELSE + dst[31:0] := a[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and add the negated intermediate result to the lower element in "c". Store the - result in the lower element of "dst" using writemask "k" (the element is copied from "a" - when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper - elements of "dst". - - IF k[0] - dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0] - ELSE - dst[31:0] := a[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0] +ELSE + dst[31:0] := a[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and add the negated intermediate result to the lower element in "c". Store the - result in the lower element of "dst" using zeromask "k" (the element is zeroed out when - mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper - elements of "dst". - [round_note] - - IF k[0] - dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0] - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and add the negated intermediate result to the lower element in "c". Store the - result in the lower element of "dst" using zeromask "k" (the element is zeroed out when - mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper - elements of "dst". - - IF k[0] - dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0] - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the negated intermediate result. Store - the result in the lower element of "dst", and copy the upper element from "a" to the - upper element of "dst". - [round_note] - - dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0] - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + +dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0] +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the negated intermediate result. Store - the result in the lower element of "dst" using writemask "k" (the element is copied from - "c" when mask bit 0 is not set), and copy the upper element from "c" to the upper - element of "dst". - [round_note] - - IF k[0] - dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0] - ELSE - dst[63:0] := c[63:0] - FI - dst[127:64] := c[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "c" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0] +ELSE + dst[63:0] := c[63:0] +FI +dst[127:64] := c[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the negated intermediate result. Store - the result in the lower element of "dst" using writemask "k" (the element is copied from - "c" when mask bit 0 is not set), and copy the upper element from "c" to the upper - element of "dst". - - IF k[0] - dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0] - ELSE - dst[63:0] := c[63:0] - FI - dst[127:64] := c[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "c" to the upper element of "dst". + +IF k[0] + dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0] +ELSE + dst[63:0] := c[63:0] +FI +dst[127:64] := c[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the negated intermediate result. Store - the result in the lower element of "dst" using writemask "k" (the element is copied from - "c" when mask bit 0 is not set), and copy the upper element from "a" to the upper - element of "dst". - [round_note] - - IF k[0] - dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0] - ELSE - dst[63:0] := a[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0] +ELSE + dst[63:0] := a[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the negated intermediate result. Store - the result in the lower element of "dst" using writemask "k" (the element is copied from - "c" when mask bit 0 is not set), and copy the upper element from "a" to the upper - element of "dst". - - IF k[0] - dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0] - ELSE - dst[63:0] := a[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0] +ELSE + dst[63:0] := a[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the negated intermediate result. Store - the result in "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not - set), and copy the upper element from "a" to the upper element of "dst". - [round_note] - - IF k[0] - dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0] - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the negated intermediate result. Store - the result in "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not - set), and copy the upper element from "a" to the upper element of "dst". - - IF k[0] - dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0] - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", subtract the lower element in "c" from the negated intermediate result, store the - result in the lower element of "dst", and copy the upper 3 packed elements from "a" to - the upper elements of "dst". - [round_note] - - dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0] - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", subtract the lower element in "c" from the negated intermediate result, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", subtract the lower element in "c" from the negated intermediate result. Store the - result in the lower element of "dst" using writemask "k" (the element is copied from "c" - when mask bit 0 is not set), and copy the upper 3 packed elements from "c" to the upper - elements of "dst". - [round_note] - - IF k[0] - dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0] - ELSE - dst[31:0] := c[31:0] - FI - dst[127:32] := c[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "c" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0] +ELSE + dst[31:0] := c[31:0] +FI +dst[127:32] := c[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the negated intermediate result. Store - the result in the lower element of "dst" using writemask "k" (the element is copied from - "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "c" to the - upper elements of "dst". - - IF k[0] - dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0] - ELSE - dst[31:0] := c[31:0] - FI - dst[127:32] := c[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "c" to the upper elements of "dst". + +IF k[0] + dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0] +ELSE + dst[31:0] := c[31:0] +FI +dst[127:32] := c[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the negated intermediate result. Store - the result in the lower element of "dst" using writemask "k" (the element is copied from - "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the - upper elements of "dst". - [round_note] - - IF k[0] - dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0] - ELSE - dst[31:0] := a[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0] +ELSE + dst[31:0] := a[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the negated intermediate result. Store - the result in the lower element of "dst" using writemask "k" (the element is copied from - "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the - upper elements of "dst". - - IF k[0] - dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0] - ELSE - dst[31:0] := a[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0] +ELSE + dst[31:0] := a[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the negated intermediate result. Store - the result in the lower element of "dst" using zeromask "k" (the element is zeroed out - when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper - elements of "dst". - [round_note] - - IF k[0] - dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0] - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the negated intermediate result. Store - the result in the lower element of "dst" using zeromask "k" (the element is zeroed out - when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper - elements of "dst". - - IF k[0] - dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0] - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] * b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] * b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] * b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] * b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] * b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] * b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower double-precision (64-bit) floating-point element in "a" and - "b", store the result in the lower element of "dst" using writemask "k" (the element is - copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to - the upper element of "dst". - [round_note] - - IF k[0] - dst[63:0] := a[63:0] * b[63:0] - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := a[63:0] * b[63:0] +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower double-precision (64-bit) floating-point element in "a" and - "b", store the result in the lower element of "dst" using writemask "k" (the element is - copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to - the upper element of "dst". - - IF k[0] - dst[63:0] := a[63:0] * b[63:0] - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := a[63:0] * b[63:0] +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower double-precision (64-bit) floating-point element in "a" and - "b", store the result in the lower element of "dst" using zeromask "k" (the element is - zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper - element of "dst". - [round_note] - - IF k[0] - dst[63:0] := a[63:0] * b[63:0] - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := a[63:0] * b[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower double-precision (64-bit) floating-point element in "a" and - "b", store the result in the lower element of "dst" using zeromask "k" (the element is - zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper - element of "dst". - - IF k[0] - dst[63:0] := a[63:0] * b[63:0] - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Multiply the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := a[63:0] * b[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower double-precision (64-bit) floating-point element in "a" and - "b", store the result in the lower element of "dst", and copy the upper element from "a" - to the upper element of "dst". - [round_note] - - dst[63:0] := a[63:0] * b[63:0] - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Multiply the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + +dst[63:0] := a[63:0] * b[63:0] +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower single-precision (32-bit) floating-point element in "a" and - "b", store the result in the lower element of "dst" using writemask "k" (the element is - copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from - "a" to the upper elements of "dst". - [round_note] - - IF k[0] - dst[31:0] := a[31:0] * b[31:0] - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := a[31:0] * b[31:0] +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower single-precision (32-bit) floating-point element in "a" and - "b", store the result in the lower element of "dst" using writemask "k" (the element is - copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from - "a" to the upper elements of "dst". - - IF k[0] - dst[31:0] := a[31:0] * b[31:0] - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := a[31:0] * b[31:0] +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower single-precision (32-bit) floating-point element in "a" and - "b", store the result in the lower element of "dst" using zeromask "k" (the element is - zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to - the upper elements of "dst". - [round_note] - - IF k[0] - dst[31:0] := a[31:0] * b[31:0] - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := a[31:0] * b[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower single-precision (32-bit) floating-point element in "a" and - "b", store the result in the lower element of "dst" using zeromask "k" (the element is - zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to - the upper elements of "dst". - - IF k[0] - dst[31:0] := a[31:0] * b[31:0] - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Multiply the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := a[31:0] * b[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower single-precision (32-bit) floating-point element in "a" and - "b", store the result in the lower element of "dst", and copy the upper 3 packed - elements from "a" to the upper elements of "dst". - [round_note] - - dst[31:0] := a[31:0] * b[31:0] - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Multiply the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst[31:0] := a[31:0] * b[31:0] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Add packed 32-bit integers in "a" and "b", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] + b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Add packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Add packed 64-bit integers in "a" and "b", and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := a[i+63:i] + b[i+63:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + Add packed 64-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] + b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Add packed 64-bit integers in "a" and "b", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] + b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Add packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Add packed 64-bit integers in "a" and "b", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] + b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Add packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the low signed 32-bit integers from each packed 64-bit element in "a" - and "b", and store the signed 64-bit results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the low signed 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Multiply the low signed 32-bit integers from each packed 64-bit element in "a" - and "b", and store the signed 64-bit results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Multiply the low signed 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Multiply the low signed 32-bit integers from each packed 64-bit element in "a" - and "b", and store the signed 64-bit results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + Multiply the low signed 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the low unsigned 32-bit integers from each packed 64-bit element in - "a" and "b", and store the unsigned 64-bit results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+31:i] * b[i+31:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Multiply the low unsigned 32-bit integers from each packed 64-bit element in - "a" and "b", and store the unsigned 64-bit results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+31:i] * b[i+31:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Multiply the low unsigned 32-bit integers from each packed 64-bit element in - "a" and "b", and store the unsigned 64-bit results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := a[i+31:i] * b[i+31:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+31:i] * b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] - b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] - b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] - b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and - store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := a[i+63:i] - b[i+63:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] - b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Subtract packed double-precision (64-bit) floating-point elements in "b" from - packed double-precision (64-bit) floating-point elements in "a", and store the results - in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is - not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] - b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed double-precision (64-bit) floating-point elements in "b" from - packed double-precision (64-bit) floating-point elements in "a", and store the results - in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is - not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] - b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Subtract packed single-precision (32-bit) floating-point elements in "b" from - packed single-precision (32-bit) floating-point elements in "a", and store the results - in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is - not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] - b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed single-precision (32-bit) floating-point elements in "b" from - packed single-precision (32-bit) floating-point elements in "a", and store the results - in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is - not set). - [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] - b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Subtract the lower double-precision (64-bit) floating-point element in "b" from - the lower double-precision (64-bit) floating-point element in "a", store the result in - the lower element of "dst" using writemask "k" (the element is copied from "src" when - mask bit 0 is not set), and copy the upper element from "a" to the upper element of - "dst". - [round_note] - - IF k[0] - dst[63:0] := a[63:0] - b[63:0] - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Subtract the lower double-precision (64-bit) floating-point element in "b" from the lower double-precision (64-bit) floating-point element in "a", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := a[63:0] - b[63:0] +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Subtract the lower double-precision (64-bit) floating-point element in "b" from - the lower double-precision (64-bit) floating-point element in "a", store the result in - the lower element of "dst" using writemask "k" (the element is copied from "src" when - mask bit 0 is not set), and copy the upper element from "a" to the upper element of - "dst". - - IF k[0] - dst[63:0] := a[63:0] - b[63:0] - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Subtract the lower double-precision (64-bit) floating-point element in "b" from the lower double-precision (64-bit) floating-point element in "a", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := a[63:0] - b[63:0] +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Subtract the lower double-precision (64-bit) floating-point element in "b" from - the lower double-precision (64-bit) floating-point element in "a", store the result in - the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 - is not set), and copy the upper element from "a" to the upper element of "dst". - [round_note] - - IF k[0] - dst[63:0] := a[63:0] - b[63:0] - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Subtract the lower double-precision (64-bit) floating-point element in "b" from the lower double-precision (64-bit) floating-point element in "a", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := a[63:0] - b[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Subtract the lower double-precision (64-bit) floating-point element in "b" from - the lower double-precision (64-bit) floating-point element in "a", store the result in - the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 - is not set), and copy the upper element from "a" to the upper element of "dst". - - IF k[0] - dst[63:0] := a[63:0] - b[63:0] - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Subtract the lower double-precision (64-bit) floating-point element in "b" from the lower double-precision (64-bit) floating-point element in "a", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := a[63:0] - b[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Subtract the lower double-precision (64-bit) floating-point element in "b" from - the lower double-precision (64-bit) floating-point element in "a", store the result in - the lower element of "dst", and copy the upper element from "a" to the upper element of - "dst". - [round_note] - - dst[63:0] := a[63:0] - b[63:0] - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Subtract the lower double-precision (64-bit) floating-point element in "b" from the lower double-precision (64-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + +dst[63:0] := a[63:0] - b[63:0] +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Subtract the lower single-precision (32-bit) floating-point element in "b" from - the lower single-precision (32-bit) floating-point element in "a", store the result in - the lower element of "dst" using writemask "k" (the element is copied from "src" when - mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper - elements of "dst". - [round_note] - - IF k[0] - dst[31:0] := a[31:0] - b[31:0] - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Subtract the lower single-precision (32-bit) floating-point element in "b" from the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := a[31:0] - b[31:0] +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Subtract the lower single-precision (32-bit) floating-point element in "b" from - the lower single-precision (32-bit) floating-point element in "a", store the result in - the lower element of "dst" using writemask "k" (the element is copied from "src" when - mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper - elements of "dst". - - IF k[0] - dst[31:0] := a[31:0] - b[31:0] - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Subtract the lower single-precision (32-bit) floating-point element in "b" from the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := a[31:0] - b[31:0] +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Subtract the lower single-precision (32-bit) floating-point element in "b" from - the lower single-precision (32-bit) floating-point element in "a", store the result in - the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 - is not set), and copy the upper 3 packed elements from "a" to the upper elements of - "dst". - [round_note] - - IF k[0] - dst[31:0] := a[31:0] - b[31:0] - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Subtract the lower single-precision (32-bit) floating-point element in "b" from the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := a[31:0] - b[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Subtract the lower single-precision (32-bit) floating-point element in "b" from - the lower single-precision (32-bit) floating-point element in "a", store the result in - the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 - is not set), and copy the upper 3 packed elements from "a" to the upper elements of - "dst". - - IF k[0] - dst[31:0] := a[31:0] - b[31:0] - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Subtract the lower single-precision (32-bit) floating-point element in "b" from the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := a[31:0] - b[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Subtract the lower single-precision (32-bit) floating-point element in "b" from - the lower single-precision (32-bit) floating-point element in "a", store the result in - the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper - elements of "dst". - [round_note] - - dst[31:0] := a[31:0] - b[31:0] - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Subtract the lower single-precision (32-bit) floating-point element in "b" from the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst[31:0] := a[31:0] - b[31:0] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Store 512-bits (composed of 8 packed 64-bit integers) from "a" into memory. - "mem_addr" does not need to be aligned on any particular boundary. - - MEM[mem_addr+511:mem_addr] := a[511:0] - - - AVX512F -
immintrin.h
- Store + + + + Store 512-bits (composed of 8 packed 64-bit integers) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + + AVX512F +
immintrin.h
+ Store
- - - - Store 512-bits (composed of 16 packed 32-bit integers) from "a" into memory. - "mem_addr" does not need to be aligned on any particular boundary. - - MEM[mem_addr+511:mem_addr] := a[511:0] - - - AVX512F -
immintrin.h
- Store + + + + Store 512-bits (composed of 16 packed 32-bit integers) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + + AVX512F +
immintrin.h
+ Store
- - - - Store 16-bit mask from "a" into memory. - - MEM[mem_addr+15:mem_addr] := a[15:0] - - - AVX512F -
immintrin.h
- Store + + + + Store 16-bit mask from "a" into memory. + +MEM[mem_addr+15:mem_addr] := a[15:0] + + + AVX512F +
immintrin.h
+ Store
- Swizzle - - - - - Contiguously store the active double-precision (64-bit) floating-point elements - in "a" (those with their respective bit set in writemask "k") to unaligned memory at - "base_addr". - - size := 64 - m := base_addr - FOR j := 0 to 7 - i := j*64 - IF k[j] - MEM[m+size-1:m] := a[i+63:i] - m := m + size - FI - ENDFOR - - - AVX512F -
immintrin.h
- Store + Swizzle + + + + + Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 64 +m := base_addr +FOR j := 0 to 7 + i := j*64 + IF k[j] + MEM[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- Swizzle - - - - - Contiguously store the active single-precision (32-bit) floating-point elements - in "a" (those with their respective bit set in writemask "k") to unaligned memory at - "base_addr". - - size := 32 - m := base_addr - FOR j := 0 to 15 - i := j*32 - IF k[j] - MEM[m+size-1:m] := a[i+31:i] - m := m + size - FI - ENDFOR - - - AVX512F -
immintrin.h
- Store + Swizzle + + + + + Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 32 +m := base_addr +FOR j := 0 to 15 + i := j*32 + IF k[j] + MEM[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - - Store packed 32-bit integers from "a" into memory using writemask "k". - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 15 - i := j*32 - IF k[j] - MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] - FI - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + Store packed 32-bit integers from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - Store 512-bits of integer data from "a" into memory. - "mem_addr" does not need to be aligned on any particular boundary. - - MEM[mem_addr+511:mem_addr] := a[511:0] - - - AVX512F -
immintrin.h
- Store + + + + Store 512-bits of integer data from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + + AVX512F +
immintrin.h
+ Store
- - - - - Store packed 64-bit integers from "a" into memory using writemask "k". - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 7 - i := j*64 - IF k[j] - MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] - FI - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + Store packed 64-bit integers from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - Store 512-bits of integer data from "a" into memory using a non-temporal memory - hint. - "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+511:mem_addr] := a[511:0] - - - AVX512F -
immintrin.h
- Store + + + + Store 512-bits of integer data from "a" into memory using a non-temporal memory hint. + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + + AVX512F +
immintrin.h
+ Store
- - - - Store 512-bits (composed of 8 packed double-precision (64-bit) floating-point - elements) from "a" into memory using a non-temporal memory hint. - "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+511:mem_addr] := a[511:0] - - - AVX512F -
immintrin.h
- Store + + + + Store 512-bits (composed of 8 packed double-precision (64-bit) floating-point elements) from "a" into memory using a non-temporal memory hint. + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + + AVX512F +
immintrin.h
+ Store
- - - - Store 512-bits (composed of 16 packed single-precision (32-bit) floating-point - elements) from "a" into memory using a non-temporal memory hint. - "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+511:mem_addr] := a[511:0] - - - AVX512F -
immintrin.h
- Store + + + + Store 512-bits (composed of 16 packed single-precision (32-bit) floating-point elements) from "a" into memory using a non-temporal memory hint. + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + + AVX512F +
immintrin.h
+ Store
- - - - - Store the lower double-precision (64-bit) floating-point element from "a" into - memory using writemask "k". - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - IF k[0] - MEM[mem_addr+63:mem_addr] := a[63:0] - FI - - - AVX512F -
immintrin.h
- Store + + + + + Store the lower double-precision (64-bit) floating-point element from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +IF k[0] + MEM[mem_addr+63:mem_addr] := a[63:0] +FI + + + AVX512F +
immintrin.h
+ Store
- - - - - Store the lower single-precision (32-bit) floating-point element from "a" into - memory using writemask "k". - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - IF k[0] - MEM[mem_addr+31:mem_addr] := a[31:0] - FI - - - AVX512F -
immintrin.h
- Store + + + + + Store the lower single-precision (32-bit) floating-point element from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +IF k[0] + MEM[mem_addr+31:mem_addr] := a[31:0] +FI + + + AVX512F +
immintrin.h
+ Store
- - - - - Store packed double-precision (64-bit) floating-point elements from "a" into - memory using writemask "k". - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 7 - i := j*64 - IF k[j] - MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] - FI - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + Store packed double-precision (64-bit) floating-point elements from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - Store 512-bits (composed of 8 packed double-precision (64-bit) floating-point - elements) from "a" into memory. - "mem_addr" does not need to be aligned on any particular boundary. - - MEM[mem_addr+511:mem_addr] := a[511:0] - - - AVX512F -
immintrin.h
- Store + + + + Store 512-bits (composed of 8 packed double-precision (64-bit) floating-point elements) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + + AVX512F +
immintrin.h
+ Store
- - - - - Store packed single-precision (32-bit) floating-point elements from "a" into - memory using writemask "k". - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 15 - i := j*32 - IF k[j] - MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] - FI - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + Store packed single-precision (32-bit) floating-point elements from "a" into memory using writemask "k". + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - Store 512-bits (composed of 16 packed single-precision (32-bit) floating-point - elements) from "a" into memory. - "mem_addr" does not need to be aligned on any particular boundary. - - MEM[mem_addr+511:mem_addr] := a[511:0] - - - AVX512F -
immintrin.h
- Store + + + + Store 512-bits (composed of 16 packed single-precision (32-bit) floating-point elements) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + + AVX512F +
immintrin.h
+ Store
- Swizzle - - - - - Contiguously store the active 32-bit integers in "a" (those with their - respective bit set in writemask "k") to unaligned memory at "base_addr". - - size := 32 - m := base_addr - FOR j := 0 to 15 - i := j*32 - IF k[j] - MEM[m+size-1:m] := a[i+31:i] - m := m + size - FI - ENDFOR - - - AVX512F -
immintrin.h
- Store + Swizzle + + + + + Contiguously store the active 32-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 32 +m := base_addr +FOR j := 0 to 15 + i := j*32 + IF k[j] + MEM[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- Swizzle - - - - - Contiguously store the active 64-bit integers in "a" (those with their - respective bit set in writemask "k") to unaligned memory at "base_addr". - - size := 64 - m := base_addr - FOR j := 0 to 7 - i := j*64 - IF k[j] - MEM[m+size-1:m] := a[i+63:i] - m := m + size - FI - ENDFOR - - - AVX512F -
immintrin.h
- Store + Swizzle + + + + + Contiguously store the active 64-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 64 +m := base_addr +FOR j := 0 to 7 + i := j*64 + IF k[j] + MEM[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - - - Scatter 64-bit integers from "a" into memory using 32-bit indices. 64-bit - elements are stored at addresses starting at "base_addr" and offset by each 32-bit - element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*64 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + + Scatter 64-bit integers from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*64 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - - - - Scatter 64-bit integers from "a" into memory using 32-bit indices. 64-bit - elements are stored at addresses starting at "base_addr" and offset by each 32-bit - element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" - (elements are not stored when the corresponding mask bit is not set). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*64 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - FI - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + + + Scatter 64-bit integers from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*64 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - - - Scatter 32-bit integers from "a" into memory using 64-bit indices. 32-bit - elements are stored at addresses starting at "base_addr" and offset by each 64-bit - element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*32 - m := j*64 - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - MEM[addr+31:addr] := a[i+31:i] - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + + Scatter 32-bit integers from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*32 + m := j*64 + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + MEM[addr+31:addr] := a[i+31:i] +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - - - - Scatter 32-bit integers from "a" into memory using 64-bit indices. 32-bit - elements are stored at addresses starting at "base_addr" and offset by each 64-bit - element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" - (elements are not stored when the corresponding mask bit is not set). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*32 - m := j*64 - IF k[j] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - MEM[addr+31:addr] := a[i+31:i] - FI - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + + + Scatter 32-bit integers from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*32 + m := j*64 + IF k[j] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + MEM[addr+31:addr] := a[i+31:i] + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - - - Scatter 64-bit integers from "a" into memory using 64-bit indices. 64-bit - elements are stored at addresses starting at "base_addr" and offset by each 64-bit - element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*64 - m := j*64 - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + + Scatter 64-bit integers from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*64 + m := j*64 + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - - - - Scatter 64-bit integers from "a" into memory using 64-bit indices. 64-bit - elements are stored at addresses starting at "base_addr" and offset by each 64-bit - element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" - (elements are not stored when the corresponding mask bit is not set). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*64 - m := j*64 - IF k[j] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - FI - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + + + Scatter 64-bit integers from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*64 + m := j*64 + IF k[j] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - - - Scatter double-precision (64-bit) floating-point elements from "a" into memory - using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" - and offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale"). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*64 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + + Scatter double-precision (64-bit) floating-point elements from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*64 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - - - - Scatter double-precision (64-bit) floating-point elements from "a" into memory - using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" - and offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is - not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*64 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - FI - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + + + Scatter double-precision (64-bit) floating-point elements from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*64 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - - - Scatter double-precision (64-bit) floating-point elements from "a" into memory - using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" - and offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale"). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*64 - m := j*64 - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + + Scatter double-precision (64-bit) floating-point elements from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*64 + m := j*64 + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - - - - Scatter double-precision (64-bit) floating-point elements from "a" into memory - using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" - and offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is - not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*64 - m := j*64 - IF k[j] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - FI - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + + + Scatter double-precision (64-bit) floating-point elements from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*64 + m := j*64 + IF k[j] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - - - Scatter single-precision (32-bit) floating-point elements from "a" into memory - using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" - and offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is - not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*32 - m := j*64 - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - MEM[addr+31:addr] := a[i+31:i] - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + + Scatter single-precision (32-bit) floating-point elements from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*32 + m := j*64 + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + MEM[addr+31:addr] := a[i+31:i] +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - - - - Scatter single-precision (32-bit) floating-point elements from "a" into memory - using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" - and offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is - not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*32 - m := j*64 - IF k[j] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - MEM[addr+31:addr] := a[i+31:i] - FI - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + + + Scatter single-precision (32-bit) floating-point elements from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*32 + m := j*64 + IF k[j] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + MEM[addr+31:addr] := a[i+31:i] + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - Multiplies elements in packed 64-bit integer vectors "a" and "b" together, - storing the lower 64 bits of the result in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := a[i+63:i] * b[i+63:i] - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Store + + + + Multiplies elements in packed 64-bit integer vectors "a" and "b" together, storing the lower 64 bits of the result in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] * b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Store
- - - - - - Multiplies elements in packed 64-bit integer vectors "a" and "b" together, - storing the lower 64 bits of the result in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] * b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Store + + + + + + Multiplies elements in packed 64-bit integer vectors "a" and "b" together, storing the lower 64 bits of the result in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] * b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Store
- - - Load 512-bits (composed of 8 packed 64-bit integers) from memory into "dst". - "mem_addr" does not need to be aligned on any particular boundary. - - dst[511:0] := MEM[mem_addr+511:mem_addr] - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + Load 512-bits (composed of 8 packed 64-bit integers) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[511:0] := MEM[mem_addr+511:mem_addr] +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - Load 512-bits (composed of 16 packed 32-bit integers) from memory into "dst". - "mem_addr" does not need to be aligned on any particular boundary. - - dst[511:0] := MEM[mem_addr+511:mem_addr] - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + Load 512-bits (composed of 16 packed 32-bit integers) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[511:0] := MEM[mem_addr+511:mem_addr] +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - Load 16-bit mask from memory into "k". - - k[15:0] := MEM[mem_addr+15:mem_addr] - - - AVX512F -
immintrin.h
- Load + + + Load 16-bit mask from memory into "k". + +k[15:0] := MEM[mem_addr+15:mem_addr] + + + AVX512F +
immintrin.h
+ Load
- Swizzle - - - - - Load contiguous active double-precision (64-bit) floating-point elements from - unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] - m := m + 64 - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + Swizzle + + + + + Load contiguous active double-precision (64-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] + m := m + 64 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- Swizzle - - - - Load contiguous active double-precision (64-bit) floating-point elements from - unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] - m := m + 64 - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + Swizzle + + + + Load contiguous active double-precision (64-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] + m := m + 64 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- Swizzle - - - - - Load contiguous active single-precision (32-bit) floating-point elements from - unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] - m := m + 32 - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + Swizzle + + + + + Load contiguous active single-precision (32-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] + m := m + 32 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- Swizzle - - - - Load contiguous active single-precision (32-bit) floating-point elements from - unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] - m := m + 32 - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + Swizzle + + + + Load contiguous active single-precision (32-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] + m := m + 32 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - Gather double-precision (64-bit) floating-point elements from memory using - 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and - offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*64 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + Gather double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*64 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - - - Gather double-precision (64-bit) floating-point elements from memory using - 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and - offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 - or 8. - - FOR j := 0 to 7 - i := j*64 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + + + Gather double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*64 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - Gather double-precision (64-bit) floating-point elements from memory using - 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and - offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*64 - m := j*64 - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + Gather double-precision (64-bit) floating-point elements from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*64 + m := j*64 + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - - - Gather double-precision (64-bit) floating-point elements from memory using - 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and - offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 - or 8. - - FOR j := 0 to 7 - i := j*64 - m := j*64 - IF k[j] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + + + Gather double-precision (64-bit) floating-point elements from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*64 + m := j*64 + IF k[j] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - Gather single-precision (32-bit) floating-point elements from memory using - 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and - offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*32 - m := j*64 - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + Gather single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*32 + m := j*64 + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - - - Gather single-precision (32-bit) floating-point elements from memory using - 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and - offset by each 64-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 - or 8. - - FOR j := 0 to 7 - i := j*32 - m := j*64 - IF k[j] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + + + Gather single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*32 + m := j*64 + IF k[j] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - Load packed double-precision (64-bit) floating-point elements from memory into - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception - may be generated. - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + Load packed double-precision (64-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - Load packed single-precision (32-bit) floating-point elements from memory into - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception - may be generated. - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - Load packed 32-bit integers from memory into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may - be generated. - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + Load packed 32-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - Load packed 64-bit integers from memory into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may - be generated. - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + Load packed 64-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - Load 512-bits of integer data from memory into "dst". - "mem_addr" does not need to be aligned on any particular boundary. - - dst[511:0] := MEM[mem_addr+511:mem_addr] - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + Load 512-bits of integer data from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[511:0] := MEM[mem_addr+511:mem_addr] +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - Load packed 32-bit integers from memory into "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + Load packed 32-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - Load packed 32-bit integers from memory into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + Load packed 32-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - Load packed 64-bit integers from memory into "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + Load packed 64-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - Load packed 64-bit integers from memory into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + Load packed 64-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - Load 512-bits of integer data from memory into "dst" using a non-temporal - memory hint. - "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may - be generated. - - dst[511:0] := MEM[mem_addr+511:mem_addr] - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + Load 512-bits of integer data from memory into "dst" using a non-temporal memory hint. + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +dst[511:0] := MEM[mem_addr+511:mem_addr] +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - Load a double-precision (64-bit) floating-point element from memory into the - lower element of "dst" using writemask "k" (the element is copied from "src" when mask - bit 0 is not set), and set the upper element of "dst" to zero. "mem_addr" must be - aligned on a 16-byte boundary or a general-protection exception may be generated. - - IF k[0] - dst[63:0] := MEM[mem_addr+63:mem_addr] - ELSE - dst[63:0] := src[63:0] - FI - dst[MAX:64] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + Load a double-precision (64-bit) floating-point element from memory into the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and set the upper element of "dst" to zero. "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +IF k[0] + dst[63:0] := MEM[mem_addr+63:mem_addr] +ELSE + dst[63:0] := src[63:0] +FI +dst[MAX:64] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - Load a double-precision (64-bit) floating-point element from memory into the - lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is - not set), and set the upper element of "dst" to zero. "mem_addr" must be aligned on a - 16-byte boundary or a general-protection exception may be generated. - - IF k[0] - dst[63:0] := MEM[mem_addr+63:mem_addr] - ELSE - dst[63:0] := 0 - FI - dst[MAX:64] := 0 - - - AVX512F -
immintrin.h
- Load + + + + Load a double-precision (64-bit) floating-point element from memory into the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and set the upper element of "dst" to zero. "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +IF k[0] + dst[63:0] := MEM[mem_addr+63:mem_addr] +ELSE + dst[63:0] := 0 +FI +dst[MAX:64] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - Load a single-precision (32-bit) floating-point element from memory into the - lower element of "dst" using writemask "k" (the element is copied from "src" when mask - bit 0 is not set), and set the upper elements of "dst" to zero. "mem_addr" must be - aligned on a 16-byte boundary or a general-protection exception may be generated. - - IF k[0] - dst[31:0] := MEM[mem_addr+31:mem_addr] - ELSE - dst[31:0] := src[31:0] - FI - dst[MAX:32] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + Load a single-precision (32-bit) floating-point element from memory into the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and set the upper elements of "dst" to zero. "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +IF k[0] + dst[31:0] := MEM[mem_addr+31:mem_addr] +ELSE + dst[31:0] := src[31:0] +FI +dst[MAX:32] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - Load a single-precision (32-bit) floating-point element from memory into the - lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is - not set), and set the upper elements of "dst" to zero. "mem_addr" must be aligned on a - 16-byte boundary or a general-protection exception may be generated. - - IF k[0] - dst[31:0] := MEM[mem_addr+31:mem_addr] - ELSE - dst[31:0] := 0 - FI - dst[MAX:32] := 0 - - - AVX512F -
immintrin.h
- Load + + + + Load a single-precision (32-bit) floating-point element from memory into the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and set the upper elements of "dst" to zero. "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +IF k[0] + dst[31:0] := MEM[mem_addr+31:mem_addr] +ELSE + dst[31:0] := 0 +FI +dst[MAX:32] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - Load 512-bits (composed of 8 packed double-precision (64-bit) floating-point - elements) from memory into "dst". - "mem_addr" does not need to be aligned on any particular boundary. - - dst[511:0] := MEM[mem_addr+511:mem_addr] - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + Load 512-bits (composed of 8 packed double-precision (64-bit) floating-point elements) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[511:0] := MEM[mem_addr+511:mem_addr] +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - Load packed double-precision (64-bit) floating-point elements from memoy into - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + Load packed double-precision (64-bit) floating-point elements from memoy into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - Load packed double-precision (64-bit) floating-point elements from memoy into - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + Load packed double-precision (64-bit) floating-point elements from memoy into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - Load 512-bits (composed of 16 packed single-precision (32-bit) floating-point - elements) from memory into "dst". - "mem_addr" does not need to be aligned on any particular boundary. - - dst[511:0] := MEM[mem_addr+511:mem_addr] - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + Load 512-bits (composed of 16 packed single-precision (32-bit) floating-point elements) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[511:0] := MEM[mem_addr+511:mem_addr] +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - Load packed single-precision (32-bit) floating-point elements from memory into - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - Load packed single-precision (32-bit) floating-point elements from memory into - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - "mem_addr" does not need to be aligned on any particular boundary. - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- Swizzle - - - - - Load contiguous active 32-bit integers from unaligned memory at "mem_addr" - (those with their respective bit set in mask "k"), and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - m := 0 - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] - m := m + 32 - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + Swizzle + + + + + Load contiguous active 32-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] + m := m + 32 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- Swizzle - - - - Load contiguous active 32-bit integers from unaligned memory at "mem_addr" - (those with their respective bit set in mask "k"), and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] - m := m + 32 - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + Swizzle + + + + Load contiguous active 32-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m] + m := m + 32 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- Swizzle - - - - - Load contiguous active 64-bit integers from unaligned memory at "mem_addr" - (those with their respective bit set in mask "k"), and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - m := 0 - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] - m := m + 64 - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + Swizzle + + + + + Load contiguous active 64-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] + m := m + 64 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- Swizzle - - - - Load contiguous active 64-bit integers from unaligned memory at "mem_addr" - (those with their respective bit set in mask "k"), and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] - m := m + 64 - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + Swizzle + + + + Load contiguous active 64-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m] + m := m + 64 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are - loaded from addresses starting at "base_addr" and offset by each 32-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst". "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*64 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*64 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - - - Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are - loaded from addresses starting at "base_addr" and offset by each 32-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst" using writemask "k" (elements are copied from "src" when the corresponding - mask bit is not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*64 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + + + Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*64 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are - loaded from addresses starting at "base_addr" and offset by each 64-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst". "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*32 - m := j*64 - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*32 + m := j*64 + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - - - Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are - loaded from addresses starting at "base_addr" and offset by each 64-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst" using writemask "k" (elements are copied from "src" when the corresponding - mask bit is not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*32 - m := j*64 - IF k[j] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + + + Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*32 + m := j*64 + IF k[j] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are - loaded from addresses starting at "base_addr" and offset by each 64-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst". "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*64 - m := j*64 - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*64 + m := j*64 + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - - - Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are - loaded from addresses starting at "base_addr" and offset by each 64-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst" using writemask "k" (elements are copied from "src" when the corresponding - mask bit is not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 7 - i := j*64 - m := j*64 - IF k[j] - addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + + + Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 7 + i := j*64 + m := j*64 + IF k[j] + addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - Compute the bitwise AND of 16-bit masks "a" and "b", and store the result in - "k". - - k[15:0] := a[15:0] AND b[15:0] - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Mask + + + + Compute the bitwise AND of 16-bit masks "a" and "b", and store the result in "k". + +k[15:0] := a[15:0] AND b[15:0] +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Mask
- - - - Compute the bitwise NOT of 16-bit masks "a" and then AND with "b", and store - the result in "k". - - k[15:0] := (NOT a[15:0]) AND b[15:0] - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Mask + + + + Compute the bitwise NOT of 16-bit masks "a" and then AND with "b", and store the result in "k". + +k[15:0] := (NOT a[15:0]) AND b[15:0] +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Mask
- - - Compute the bitwise NOT of 16-bit mask "a", and store the result in "k". - - k[15:0] := NOT a[15:0] - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Mask + + + Compute the bitwise NOT of 16-bit mask "a", and store the result in "k". + +k[15:0] := NOT a[15:0] +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Mask
- - - - Compute the bitwise OR of 16-bit masks "a" and "b", and store the result in - "k". - - k[15:0] := a[15:0] OR b[15:0] - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Mask + + + + Compute the bitwise OR of 16-bit masks "a" and "b", and store the result in "k". + +k[15:0] := a[15:0] OR b[15:0] +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Mask
- - - - Compute the bitwise XNOR of 16-bit masks "a" and "b", and store the result in - "k". - - k[15:0] := NOT (a[15:0] XOR b[15:0]) - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Mask + + + + Compute the bitwise XNOR of 16-bit masks "a" and "b", and store the result in "k". + +k[15:0] := NOT (a[15:0] XOR b[15:0]) +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Mask
- - - - Compute the bitwise XOR of 16-bit masks "a" and "b", and store the result in - "k". - - k[15:0] := a[15:0] XOR b[15:0] - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Mask + + + + Compute the bitwise XOR of 16-bit masks "a" and "b", and store the result in "k". + +k[15:0] := a[15:0] XOR b[15:0] +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Mask
- - - - Shift the bits of 16-bit mask "a" left by "count" while shifting in zeros, and - store the least significant 16 bits of the result in "k". - - k[MAX:0] := 0 - IF count[7:0] <= 15 - k[15:0] := a[15:0] << count[7:0] - FI - - - AVX512F -
immintrin.h
- Mask + + + + Shift the bits of 16-bit mask "a" left by "count" while shifting in zeros, and store the least significant 16 bits of the result in "k". + +k[MAX:0] := 0 +IF count[7:0] <= 15 + k[15:0] := a[15:0] << count[7:0] +FI + + + AVX512F +
immintrin.h
+ Mask
- - - - Shift the bits of 16-bit mask "a" right by "count" while shifting in zeros, and - store the least significant 16 bits of the result in "k". - - k[MAX:0] := 0 - IF count[7:0] <= 15 - k[15:0] := a[15:0] >> count[7:0] - FI - - - AVX512F -
immintrin.h
- Mask + + + + Shift the bits of 16-bit mask "a" right by "count" while shifting in zeros, and store the least significant 16 bits of the result in "k". + +k[MAX:0] := 0 +IF count[7:0] <= 15 + k[15:0] := a[15:0] >> count[7:0] +FI + + + AVX512F +
immintrin.h
+ Mask
- - - - - Compute the bitwise OR of 16-bit masks "a" and "b". If the result is all zeros, - store 1 in "dst", otherwise store 0 in "dst". If the result is all ones, store 1 in - "all_ones", otherwise store 0 in "all_ones". - - tmp[15:0] := a[15:0] OR b[15:0] - IF tmp[15:0] == 0x0 - dst := 1 - ELSE - dst := 0 - FI - IF tmp[15:0] == 0xFFFF - MEM[all_ones+7:all_ones] := 1 - ELSE - MEM[all_ones+7:all_ones] := 0 - FI - - - AVX512F -
immintrin.h
- Mask + + + + + Compute the bitwise OR of 16-bit masks "a" and "b". If the result is all zeros, store 1 in "dst", otherwise store 0 in "dst". If the result is all ones, store 1 in "all_ones", otherwise store 0 in "all_ones". + +tmp[15:0] := a[15:0] OR b[15:0] +IF tmp[15:0] == 0x0 + dst := 1 +ELSE + dst := 0 +FI +IF tmp[15:0] == 0xFFFF + MEM[all_ones+7:all_ones] := 1 +ELSE + MEM[all_ones+7:all_ones] := 0 +FI + + + AVX512F +
immintrin.h
+ Mask
- - - - Compute the bitwise OR of 16-bit masks "a" and "b". If the result is all - zeroes, store 1 in "dst", otherwise store 0 in "dst". - - tmp[15:0] := a[15:0] OR b[15:0] - IF tmp[15:0] == 0x0 - dst := 1 - ELSE - dst := 0 - FI - - - AVX512F -
immintrin.h
- Mask + + + + Compute the bitwise OR of 16-bit masks "a" and "b". If the result is all zeroes, store 1 in "dst", otherwise store 0 in "dst". + +tmp[15:0] := a[15:0] OR b[15:0] +IF tmp[15:0] == 0x0 + dst := 1 +ELSE + dst := 0 +FI + + + AVX512F +
immintrin.h
+ Mask
- - - - Compute the bitwise OR of 16-bit masks "a" and "b". If the result is all ones, - store 1 in "dst", otherwise store 0 in "dst". - - tmp[15:0] := a[15:0] OR b[15:0] - IF tmp[15:0] == 0xFFFF - dst := 1 - ELSE - dst := 0 - FI - - - AVX512F -
immintrin.h
- Mask + + + + Compute the bitwise OR of 16-bit masks "a" and "b". If the result is all ones, store 1 in "dst", otherwise store 0 in "dst". + +tmp[15:0] := a[15:0] OR b[15:0] +IF tmp[15:0] == 0xFFFF + dst := 1 +ELSE + dst := 0 +FI + + + AVX512F +
immintrin.h
+ Mask
- - - Convert 16-bit mask "a" into an integer value, and store the result in "dst". - - dst := ZeroExtend32(a[15:0]) - - - AVX512F -
immintrin.h
- Mask + + + Convert 16-bit mask "a" into an integer value, and store the result in "dst". + +dst := ZeroExtend32(a[15:0]) + + + AVX512F +
immintrin.h
+ Mask
- - - Convert integer value "a" into an 16-bit mask, and store the result in "k". - - k := ZeroExtend16(a[15:0]) - - - AVX512F -
immintrin.h
- Mask + + + Convert integer value "a" into an 16-bit mask, and store the result in "k". + +k := ZeroExtend16(a[15:0]) + + + AVX512F +
immintrin.h
+ Mask
- - - - Compute the bitwise NOT of 16-bit masks "a" and then AND with "b", and store - the result in "k". - - k[15:0] := (NOT a[15:0]) AND b[15:0] - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Mask + + + + Compute the bitwise NOT of 16-bit masks "a" and then AND with "b", and store the result in "k". + +k[15:0] := (NOT a[15:0]) AND b[15:0] +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Mask
- - - - Compute the bitwise AND of 16-bit masks "a" and "b", and store the result in - "k". - - k[15:0] := a[15:0] AND b[15:0] - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Mask + + + + Compute the bitwise AND of 16-bit masks "a" and "b", and store the result in "k". + +k[15:0] := a[15:0] AND b[15:0] +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Mask
- - - Copy 16-bit mask "a" to "k". - - k[15:0] := a[15:0] - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Mask + + + Copy 16-bit mask "a" to "k". + +k[15:0] := a[15:0] +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Mask
- - - Compute the bitwise NOT of 16-bit mask "a", and store the result in "k". - - k[15:0] := NOT a[15:0] - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Mask + + + Compute the bitwise NOT of 16-bit mask "a", and store the result in "k". + +k[15:0] := NOT a[15:0] +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Mask
- - - - Compute the bitwise OR of 16-bit masks "a" and "b", and store the result in - "k". - - k[15:0] := a[15:0] OR b[15:0] - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Mask + + + + Compute the bitwise OR of 16-bit masks "a" and "b", and store the result in "k". + +k[15:0] := a[15:0] OR b[15:0] +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Mask
- - - - Unpack and interleave 8 bits from masks "a" and "b", and store the 16-bit - result in "k". - - k[7:0] := b[7:0] - k[15:8] := a[7:0] - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Mask + + + + Unpack and interleave 8 bits from masks "a" and "b", and store the 16-bit result in "k". + +k[7:0] := b[7:0] +k[15:8] := a[7:0] +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Mask
- - - - Compute the bitwise XNOR of 16-bit masks "a" and "b", and store the result in - "k". - - k[15:0] := NOT (a[15:0] XOR b[15:0]) - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Mask + + + + Compute the bitwise XNOR of 16-bit masks "a" and "b", and store the result in "k". + +k[15:0] := NOT (a[15:0] XOR b[15:0]) +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Mask
- - - - Compute the bitwise XOR of 16-bit masks "a" and "b", and store the result in - "k". - - k[15:0] := a[15:0] XOR b[15:0] - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Mask + + + + Compute the bitwise XOR of 16-bit masks "a" and "b", and store the result in "k". + +k[15:0] := a[15:0] XOR b[15:0] +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Mask
- - - - Performs bitwise OR between "k1" and "k2", storing the result in "dst". ZF flag - is set if "dst" is 0. - dst[15:0] := k1[15:0] | k2[15:0] - IF dst == 0 - SetZF() - FI - - - AVX512F -
immintrin.h
- Mask + + + + Performs bitwise OR between "k1" and "k2", storing the result in "dst". ZF flag is set if "dst" is 0. + dst[15:0] := k1[15:0] | k2[15:0] +IF dst == 0 + SetZF() +FI + + + AVX512F +
immintrin.h
+ Mask
- - - - Performs bitwise OR between "k1" and "k2", storing the result in "dst". CF flag - is set if "dst" consists of all 1's. - dst[15:0] := k1[15:0] | k2[15:0] - IF PopCount(dst[15:0]) == 16 - SetCF() - FI - - - AVX512F -
immintrin.h
- Mask + + + + Performs bitwise OR between "k1" and "k2", storing the result in "dst". CF flag is set if "dst" consists of all 1's. + dst[15:0] := k1[15:0] | k2[15:0] +IF PopCount(dst[15:0]) == 16 + SetCF() +FI + + + AVX512F +
immintrin.h
+ Mask
- - - Converts bit mask "k1" into an integer value, storing the results in "dst". - - dst := ZeroExtend32(k1) - - - AVX512F -
immintrin.h
- Mask + + + Converts bit mask "k1" into an integer value, storing the results in "dst". + +dst := ZeroExtend32(k1) + + + AVX512F +
immintrin.h
+ Mask
- - - Converts integer "mask" into bitmask, storing the result in "dst". - - dst := mask[15:0] - - - AVX512F -
immintrin.h
- Mask + + + Converts integer "mask" into bitmask, storing the result in "dst". + +dst := mask[15:0] + + + AVX512F +
immintrin.h
+ Mask
- - - - - - Concatenate "a" and "b" into a 128-byte immediate result, shift the result - right by "imm8" 32-bit elements, and stores the low 64 bytes (16 elements) in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - temp[1023:512] := a[511:0] - temp[511:0] := b[511:0] - temp[1023:0] := temp[1023:0] >> (32*imm8[3:0]) - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := temp[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + Concatenate "a" and "b" into a 128-byte immediate result, shift the result right by "imm8" 32-bit elements, and stores the low 64 bytes (16 elements) in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +temp[1023:512] := a[511:0] +temp[511:0] := b[511:0] +temp[1023:0] := temp[1023:0] >> (32*imm8[3:0]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := temp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Concatenate "a" and "b" into a 128-byte immediate result, shift the result - right by "imm8" 64-bit elements, and store the low 64 bytes (8 elements) in "dst". - - temp[1023:512] := a[511:0] - temp[511:0] := b[511:0] - temp[1023:0] := temp[1023:0] >> (64*imm8[2:0]) - dst[511:0] := temp[511:0] - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + Concatenate "a" and "b" into a 128-byte immediate result, shift the result right by "imm8" 64-bit elements, and store the low 64 bytes (8 elements) in "dst". + +temp[1023:512] := a[511:0] +temp[511:0] := b[511:0] +temp[1023:0] := temp[1023:0] >> (64*imm8[2:0]) +dst[511:0] := temp[511:0] +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Concatenate "a" and "b" into a 128-byte immediate result, shift the result - right by "imm8" 64-bit elements, and store the low 64 bytes (8 elements) in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - temp[1023:512] := a[511:0] - temp[511:0] := b[511:0] - temp[1023:0] := temp[1023:0] >> (64*imm8[2:0]) - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := temp[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Concatenate "a" and "b" into a 128-byte immediate result, shift the result right by "imm8" 64-bit elements, and store the low 64 bytes (8 elements) in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +temp[1023:512] := a[511:0] +temp[511:0] := b[511:0] +temp[1023:0] := temp[1023:0] >> (64*imm8[2:0]) +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := temp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Concatenate "a" and "b" into a 128-byte immediate result, shift the result - right by "imm8" 64-bit elements, and stores the low 64 bytes (8 elements) in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - temp[1023:512] := a[511:0] - temp[511:0] := b[511:0] - temp[1023:0] := temp[1023:0] >> (64*imm8[2:0]) - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := temp[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + Concatenate "a" and "b" into a 128-byte immediate result, shift the result right by "imm8" 64-bit elements, and stores the low 64 bytes (8 elements) in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +temp[1023:512] := a[511:0] +temp[511:0] := b[511:0] +temp[1023:0] := temp[1023:0] >> (64*imm8[2:0]) +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := temp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" - using packed 64-bit integers in "c", and store the results in "dst". "imm8" is used to - set the required flags reporting. - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { - tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] - CASE(tsrc[63:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[63:0] := src1[63:0] - 1 : dest[63:0] := tsrc[63:0] - 2 : dest[63:0] := QNaN(tsrc[63:0]) - 3 : dest[63:0] := QNAN_Indefinite - 4 : dest[63:0] := -INF - 5 : dest[63:0] := +INF - 6 : dest[63:0] := tsrc.sign? -INF : +INF - 7 : dest[63:0] := -0 - 8 : dest[63:0] := +0 - 9 : dest[63:0] := -1 - 10: dest[63:0] := +1 - 11: dest[63:0] := 1/2 - 12: dest[63:0] := 90.0 - 13: dest[63:0] := PI/2 - 14: dest[63:0] := MAX_FLOAT - 15: dest[63:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[63:0] - } - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst". "imm8" is used to set the required flags reporting. + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? -INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1/2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[63:0] +} +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" - using packed 64-bit integers in "c", and store the results in "dst". "imm8" is used to - set the required flags reporting. - [sae_note] - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { - tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] - CASE(tsrc[63:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[63:0] := src1[63:0] - 1 : dest[63:0] := tsrc[63:0] - 2 : dest[63:0] := QNaN(tsrc[63:0]) - 3 : dest[63:0] := QNAN_Indefinite - 4 : dest[63:0] := -INF - 5 : dest[63:0] := +INF - 6 : dest[63:0] := tsrc.sign? -INF : +INF - 7 : dest[63:0] := -0 - 8 : dest[63:0] := +0 - 9 : dest[63:0] := -1 - 10: dest[63:0] := +1 - 11: dest[63:0] := 1/2 - 12: dest[63:0] := 90.0 - 13: dest[63:0] := PI/2 - 14: dest[63:0] := MAX_FLOAT - 15: dest[63:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[63:0] - } - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst". "imm8" is used to set the required flags reporting. + [sae_note] + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? -INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1/2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[63:0] +} +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" - using packed 64-bit integers in "c", and store the results in "dst" using writemask "k" - (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is - used to set the required flags reporting. - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { - tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] - CASE(tsrc[63:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[63:0] := src1[63:0] - 1 : dest[63:0] := tsrc[63:0] - 2 : dest[63:0] := QNaN(tsrc[63:0]) - 3 : dest[63:0] := QNAN_Indefinite - 4 : dest[63:0] := -INF - 5 : dest[63:0] := +INF - 6 : dest[63:0] := tsrc.sign? -INF : +INF - 7 : dest[63:0] := -0 - 8 : dest[63:0] := +0 - 9 : dest[63:0] := -1 - 10: dest[63:0] := +1 - 11: dest[63:0] := 1/2 - 12: dest[63:0] := 90.0 - 13: dest[63:0] := PI/2 - 14: dest[63:0] := MAX_FLOAT - 15: dest[63:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[63:0] - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? -INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1/2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[63:0] +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - - Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" - using packed 64-bit integers in "c", and store the results in "dst" using writemask "k" - (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is - used to set the required flags reporting. - [sae_note] - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { - tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] - CASE(tsrc[63:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[63:0] := src1[63:0] - 1 : dest[63:0] := tsrc[63:0] - 2 : dest[63:0] := QNaN(tsrc[63:0]) - 3 : dest[63:0] := QNAN_Indefinite - 4 : dest[63:0] := -INF - 5 : dest[63:0] := +INF - 6 : dest[63:0] := tsrc.sign? -INF : +INF - 7 : dest[63:0] := -0 - 8 : dest[63:0] := +0 - 9 : dest[63:0] := -1 - 10: dest[63:0] := +1 - 11: dest[63:0] := 1/2 - 12: dest[63:0] := 90.0 - 13: dest[63:0] := PI/2 - 14: dest[63:0] := MAX_FLOAT - 15: dest[63:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[63:0] - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + + Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + [sae_note] + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? -INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1/2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[63:0] +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" - using packed 64-bit integers in "c", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to - set the required flags reporting. - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { - tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] - CASE(tsrc[63:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[63:0] := src1[63:0] - 1 : dest[63:0] := tsrc[63:0] - 2 : dest[63:0] := QNaN(tsrc[63:0]) - 3 : dest[63:0] := QNAN_Indefinite - 4 : dest[63:0] := -INF - 5 : dest[63:0] := +INF - 6 : dest[63:0] := tsrc.sign? -INF : +INF - 7 : dest[63:0] := -0 - 8 : dest[63:0] := +0 - 9 : dest[63:0] := -1 - 10: dest[63:0] := +1 - 11: dest[63:0] := 1/2 - 12: dest[63:0] := 90.0 - 13: dest[63:0] := PI/2 - 14: dest[63:0] := MAX_FLOAT - 15: dest[63:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[63:0] - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? -INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1/2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[63:0] +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - - Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" - using packed 64-bit integers in "c", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to - set the required flags reporting. - [sae_note] - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { - tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] - CASE(tsrc[63:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[63:0] := src1[63:0] - 1 : dest[63:0] := tsrc[63:0] - 2 : dest[63:0] := QNaN(tsrc[63:0]) - 3 : dest[63:0] := QNAN_Indefinite - 4 : dest[63:0] := -INF - 5 : dest[63:0] := +INF - 6 : dest[63:0] := tsrc.sign? -INF : +INF - 7 : dest[63:0] := -0 - 8 : dest[63:0] := +0 - 9 : dest[63:0] := -1 - 10: dest[63:0] := +1 - 11: dest[63:0] := 1/2 - 12: dest[63:0] := 90.0 - 13: dest[63:0] := PI/2 - 14: dest[63:0] := MAX_FLOAT - 15: dest[63:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[63:0] - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + + Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + [sae_note] + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? -INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1/2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[63:0] +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" - using packed 32-bit integers in "c", and store the results in "dst". "imm8" is used to - set the required flags reporting. - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { - tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] - CASE(tsrc[31:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[31:0] := src1[31:0] - 1 : dest[31:0] := tsrc[31:0] - 2 : dest[31:0] := QNaN(tsrc[31:0]) - 3 : dest[31:0] := QNAN_Indefinite - 4 : dest[31:0] := -INF - 5 : dest[31:0] := +INF - 6 : dest[31:0] := tsrc.sign? -INF : +INF - 7 : dest[31:0] := -0 - 8 : dest[31:0] := +0 - 9 : dest[31:0] := -1 - 10: dest[31:0] := +1 - 11: dest[31:0] := 1/2 - 12: dest[31:0] := 90.0 - 13: dest[31:0] := PI/2 - 14: dest[31:0] := MAX_FLOAT - 15: dest[31:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[31:0] - } - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst". "imm8" is used to set the required flags reporting. + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? -INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1/2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[31:0] +} +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" - using packed 32-bit integers in "c", and store the results in "dst". "imm8" is used to - set the required flags reporting. - [sae_note] - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { - tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] - CASE(tsrc[31:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[31:0] := src1[31:0] - 1 : dest[31:0] := tsrc[31:0] - 2 : dest[31:0] := QNaN(tsrc[31:0]) - 3 : dest[31:0] := QNAN_Indefinite - 4 : dest[31:0] := -INF - 5 : dest[31:0] := +INF - 6 : dest[31:0] := tsrc.sign? -INF : +INF - 7 : dest[31:0] := -0 - 8 : dest[31:0] := +0 - 9 : dest[31:0] := -1 - 10: dest[31:0] := +1 - 11: dest[31:0] := 1/2 - 12: dest[31:0] := 90.0 - 13: dest[31:0] := PI/2 - 14: dest[31:0] := MAX_FLOAT - 15: dest[31:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[31:0] - } - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst". "imm8" is used to set the required flags reporting. + [sae_note] + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? -INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1/2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[31:0] +} +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" - using packed 32-bit integers in "c", and store the results in "dst" using writemask "k" - (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is - used to set the required flags reporting. - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { - tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] - CASE(tsrc[31:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[31:0] := src1[31:0] - 1 : dest[31:0] := tsrc[31:0] - 2 : dest[31:0] := QNaN(tsrc[31:0]) - 3 : dest[31:0] := QNAN_Indefinite - 4 : dest[31:0] := -INF - 5 : dest[31:0] := +INF - 6 : dest[31:0] := tsrc.sign? -INF : +INF - 7 : dest[31:0] := -0 - 8 : dest[31:0] := +0 - 9 : dest[31:0] := -1 - 10: dest[31:0] := +1 - 11: dest[31:0] := 1/2 - 12: dest[31:0] := 90.0 - 13: dest[31:0] := PI/2 - 14: dest[31:0] := MAX_FLOAT - 15: dest[31:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[31:0] - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? -INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1/2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[31:0] +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - - Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" - using packed 32-bit integers in "c", and store the results in "dst" using writemask "k" - (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is - used to set the required flags reporting. - [sae_note] - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { - tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] - CASE(tsrc[31:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[31:0] := src1[31:0] - 1 : dest[31:0] := tsrc[31:0] - 2 : dest[31:0] := QNaN(tsrc[31:0]) - 3 : dest[31:0] := QNAN_Indefinite - 4 : dest[31:0] := -INF - 5 : dest[31:0] := +INF - 6 : dest[31:0] := tsrc.sign? -INF : +INF - 7 : dest[31:0] := -0 - 8 : dest[31:0] := +0 - 9 : dest[31:0] := -1 - 10: dest[31:0] := +1 - 11: dest[31:0] := 1/2 - 12: dest[31:0] := 90.0 - 13: dest[31:0] := PI/2 - 14: dest[31:0] := MAX_FLOAT - 15: dest[31:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[31:0] - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + + Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + [sae_note] + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? -INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1/2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[31:0] +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" - using packed 32-bit integers in "c", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to - set the required flags reporting. - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { - tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] - CASE(tsrc[31:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[31:0] := src1[31:0] - 1 : dest[31:0] := tsrc[31:0] - 2 : dest[31:0] := QNaN(tsrc[31:0]) - 3 : dest[31:0] := QNAN_Indefinite - 4 : dest[31:0] := -INF - 5 : dest[31:0] := +INF - 6 : dest[31:0] := tsrc.sign? -INF : +INF - 7 : dest[31:0] := -0 - 8 : dest[31:0] := +0 - 9 : dest[31:0] := -1 - 10: dest[31:0] := +1 - 11: dest[31:0] := 1/2 - 12: dest[31:0] := 90.0 - 13: dest[31:0] := PI/2 - 14: dest[31:0] := MAX_FLOAT - 15: dest[31:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[31:0] - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? -INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1/2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[31:0] +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - - Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" - using packed 32-bit integers in "c", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to - set the required flags reporting. - [sae_note] - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { - tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] - CASE(tsrc[31:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[31:0] := src1[31:0] - 1 : dest[31:0] := tsrc[31:0] - 2 : dest[31:0] := QNaN(tsrc[31:0]) - 3 : dest[31:0] := QNAN_Indefinite - 4 : dest[31:0] := -INF - 5 : dest[31:0] := +INF - 6 : dest[31:0] := tsrc.sign? -INF : +INF - 7 : dest[31:0] := -0 - 8 : dest[31:0] := +0 - 9 : dest[31:0] := -1 - 10: dest[31:0] := +1 - 11: dest[31:0] := 1/2 - 12: dest[31:0] := 90.0 - 13: dest[31:0] := PI/2 - 14: dest[31:0] := MAX_FLOAT - 15: dest[31:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[31:0] - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + + Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting. + [sae_note] + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? -INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1/2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[31:0] +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Fix up the lower double-precision (64-bit) floating-point elements in "a" and - "b" using the lower 64-bit integer in "c", store the result in the lower element of - "dst", and copy the upper element from "b" to the upper element of "dst". "imm8" is used - to set the required flags reporting. - [sae_note] - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { - tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] - CASE(tsrc[63:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[63:0] := src1[63:0] - 1 : dest[63:0] := tsrc[63:0] - 2 : dest[63:0] := QNaN(tsrc[63:0]) - 3 : dest[63:0] := QNAN_Indefinite - 4 : dest[63:0] := -INF - 5 : dest[63:0] := +INF - 6 : dest[63:0] := tsrc.sign? -INF : +INF - 7 : dest[63:0] := -0 - 8 : dest[63:0] := +0 - 9 : dest[63:0] := -1 - 10: dest[63:0] := +1 - 11: dest[63:0] := 1/2 - 12: dest[63:0] := 90.0 - 13: dest[63:0] := PI/2 - 14: dest[63:0] := MAX_FLOAT - 15: dest[63:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[63:0] - } - dst[63:0] := FIXUPIMMPD(a[63:0], b[63:0], c[63:0], imm8[7:0]) - dst[127:64] := b[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Fix up the lower double-precision (64-bit) floating-point elements in "a" and "b" using the lower 64-bit integer in "c", store the result in the lower element of "dst", and copy the upper element from "b" to the upper element of "dst". "imm8" is used to set the required flags reporting. + [sae_note] + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? -INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1/2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[63:0] +} +dst[63:0] := FIXUPIMMPD(a[63:0], b[63:0], c[63:0], imm8[7:0]) +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Fix up the lower double-precision (64-bit) floating-point elements in "a" and - "b" using the lower 64-bit integer in "c", store the result in the lower element of - "dst", and copy the upper element from "b" to the upper element of "dst". "imm8" is used - to set the required flags reporting. - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { - tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] - CASE(tsrc[63:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[63:0] := src1[63:0] - 1 : dest[63:0] := tsrc[63:0] - 2 : dest[63:0] := QNaN(tsrc[63:0]) - 3 : dest[63:0] := QNAN_Indefinite - 4 : dest[63:0] := -INF - 5 : dest[63:0] := +INF - 6 : dest[63:0] := tsrc.sign? -INF : +INF - 7 : dest[63:0] := -0 - 8 : dest[63:0] := +0 - 9 : dest[63:0] := -1 - 10: dest[63:0] := +1 - 11: dest[63:0] := 1/2 - 12: dest[63:0] := 90.0 - 13: dest[63:0] := PI/2 - 14: dest[63:0] := MAX_FLOAT - 15: dest[63:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[63:0] - } - dst[63:0] := FIXUPIMMPD(a[63:0], b[63:0], c[63:0], imm8[7:0]) - dst[127:64] := b[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + Fix up the lower double-precision (64-bit) floating-point elements in "a" and "b" using the lower 64-bit integer in "c", store the result in the lower element of "dst", and copy the upper element from "b" to the upper element of "dst". "imm8" is used to set the required flags reporting. + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? -INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1/2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[63:0] +} +dst[63:0] := FIXUPIMMPD(a[63:0], b[63:0], c[63:0], imm8[7:0]) +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - - Fix up the lower double-precision (64-bit) floating-point elements in "a" and - "b" using the lower 64-bit integer in "c", store the result in the lower element of - "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), - and copy the upper element from "b" to the upper element of "dst". "imm8" is used to set - the required flags reporting. - [sae_note] - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { - tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] - CASE(tsrc[63:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[63:0] := src1[63:0] - 1 : dest[63:0] := tsrc[63:0] - 2 : dest[63:0] := QNaN(tsrc[63:0]) - 3 : dest[63:0] := QNAN_Indefinite - 4 : dest[63:0] := -INF - 5 : dest[63:0] := +INF - 6 : dest[63:0] := tsrc.sign? -INF : +INF - 7 : dest[63:0] := -0 - 8 : dest[63:0] := +0 - 9 : dest[63:0] := -1 - 10: dest[63:0] := +1 - 11: dest[63:0] := 1/2 - 12: dest[63:0] := 90.0 - 13: dest[63:0] := PI/2 - 14: dest[63:0] := MAX_FLOAT - 15: dest[63:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[63:0] - } - IF k[0] - dst[63:0] := FIXUPIMMPD(a[63:0], b[63:0], c[63:0], imm8[7:0]) - ELSE - dst[63:0] := a[63:0] - FI - dst[127:64] := b[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + + Fix up the lower double-precision (64-bit) floating-point elements in "a" and "b" using the lower 64-bit integer in "c", store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper element from "b" to the upper element of "dst". "imm8" is used to set the required flags reporting. + [sae_note] + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? -INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1/2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[63:0] +} +IF k[0] + dst[63:0] := FIXUPIMMPD(a[63:0], b[63:0], c[63:0], imm8[7:0]) +ELSE + dst[63:0] := a[63:0] +FI +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Fix up the lower double-precision (64-bit) floating-point elements in "a" and - "b" using the lower 64-bit integer in "c", store the result in the lower element of - "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), - and copy the upper element from "b" to the upper element of "dst". "imm8" is used to set - the required flags reporting. - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { - tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] - CASE(tsrc[63:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[63:0] := src1[63:0] - 1 : dest[63:0] := tsrc[63:0] - 2 : dest[63:0] := QNaN(tsrc[63:0]) - 3 : dest[63:0] := QNAN_Indefinite - 4 : dest[63:0] := -INF - 5 : dest[63:0] := +INF - 6 : dest[63:0] := tsrc.sign? -INF : +INF - 7 : dest[63:0] := -0 - 8 : dest[63:0] := +0 - 9 : dest[63:0] := -1 - 10: dest[63:0] := +1 - 11: dest[63:0] := 1/2 - 12: dest[63:0] := 90.0 - 13: dest[63:0] := PI/2 - 14: dest[63:0] := MAX_FLOAT - 15: dest[63:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[63:0] - } - IF k[0] - dst[63:0] := FIXUPIMMPD(a[63:0], b[63:0], c[63:0], imm8[7:0]) - ELSE - dst[63:0] := a[63:0] - FI - dst[127:64] := b[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Fix up the lower double-precision (64-bit) floating-point elements in "a" and "b" using the lower 64-bit integer in "c", store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper element from "b" to the upper element of "dst". "imm8" is used to set the required flags reporting. + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? -INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1/2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[63:0] +} +IF k[0] + dst[63:0] := FIXUPIMMPD(a[63:0], b[63:0], c[63:0], imm8[7:0]) +ELSE + dst[63:0] := a[63:0] +FI +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - - Fix up the lower double-precision (64-bit) floating-point elements in "a" and - "b" using the lower 64-bit integer in "c", store the result in the lower element of - "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and - copy the upper element from "b" to the upper element of "dst". "imm8" is used to set the - required flags reporting. - [sae_note] - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { - tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] - CASE(tsrc[63:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[63:0] := src1[63:0] - 1 : dest[63:0] := tsrc[63:0] - 2 : dest[63:0] := QNaN(tsrc[63:0]) - 3 : dest[63:0] := QNAN_Indefinite - 4 : dest[63:0] := -INF - 5 : dest[63:0] := +INF - 6 : dest[63:0] := tsrc.sign? -INF : +INF - 7 : dest[63:0] := -0 - 8 : dest[63:0] := +0 - 9 : dest[63:0] := -1 - 10: dest[63:0] := +1 - 11: dest[63:0] := 1/2 - 12: dest[63:0] := 90.0 - 13: dest[63:0] := PI/2 - 14: dest[63:0] := MAX_FLOAT - 15: dest[63:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[63:0] - } - IF k[0] - dst[63:0] := FIXUPIMMPD(a[63:0], b[63:0], c[63:0], imm8[7:0]) - ELSE - dst[63:0] := 0 - FI - dst[127:64] := b[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + + Fix up the lower double-precision (64-bit) floating-point elements in "a" and "b" using the lower 64-bit integer in "c", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "b" to the upper element of "dst". "imm8" is used to set the required flags reporting. + [sae_note] + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? -INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1/2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[63:0] +} +IF k[0] + dst[63:0] := FIXUPIMMPD(a[63:0], b[63:0], c[63:0], imm8[7:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Fix up the lower double-precision (64-bit) floating-point elements in "a" and - "b" using the lower 64-bit integer in "c", store the result in the lower element of - "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and - copy the upper element from "b" to the upper element of "dst". "imm8" is used to set the - required flags reporting. - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { - tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] - CASE(tsrc[63:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[63:0] := src1[63:0] - 1 : dest[63:0] := tsrc[63:0] - 2 : dest[63:0] := QNaN(tsrc[63:0]) - 3 : dest[63:0] := QNAN_Indefinite - 4 : dest[63:0] := -INF - 5 : dest[63:0] := +INF - 6 : dest[63:0] := tsrc.sign? -INF : +INF - 7 : dest[63:0] := -0 - 8 : dest[63:0] := +0 - 9 : dest[63:0] := -1 - 10: dest[63:0] := +1 - 11: dest[63:0] := 1/2 - 12: dest[63:0] := 90.0 - 13: dest[63:0] := PI/2 - 14: dest[63:0] := MAX_FLOAT - 15: dest[63:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[63:0] - } - IF k[0] - dst[63:0] := FIXUPIMMPD(a[63:0], b[63:0], c[63:0], imm8[7:0]) - ELSE - dst[63:0] := 0 - FI - dst[127:64] := b[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Fix up the lower double-precision (64-bit) floating-point elements in "a" and "b" using the lower 64-bit integer in "c", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "b" to the upper element of "dst". "imm8" is used to set the required flags reporting. + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) { + tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0] + CASE(tsrc[63:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[63:0] := src1[63:0] + 1 : dest[63:0] := tsrc[63:0] + 2 : dest[63:0] := QNaN(tsrc[63:0]) + 3 : dest[63:0] := QNAN_Indefinite + 4 : dest[63:0] := -INF + 5 : dest[63:0] := +INF + 6 : dest[63:0] := tsrc.sign? -INF : +INF + 7 : dest[63:0] := -0 + 8 : dest[63:0] := +0 + 9 : dest[63:0] := -1 + 10: dest[63:0] := +1 + 11: dest[63:0] := 1/2 + 12: dest[63:0] := 90.0 + 13: dest[63:0] := PI/2 + 14: dest[63:0] := MAX_FLOAT + 15: dest[63:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[63:0] +} +IF k[0] + dst[63:0] := FIXUPIMMPD(a[63:0], b[63:0], c[63:0], imm8[7:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := b[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Fix up the lower single-precision (32-bit) floating-point elements in "a" and - "b" using the lower 32-bit integer in "c", store the result in the lower element of - "dst", and copy the upper 3 packed elements from "b" to the upper elements of "dst". - "imm8" is used to set the required flags reporting. - [sae_note] - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { - tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] - CASE(tsrc[31:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[31:0] := src1[31:0] - 1 : dest[31:0] := tsrc[31:0] - 2 : dest[31:0] := QNaN(tsrc[31:0]) - 3 : dest[31:0] := QNAN_Indefinite - 4 : dest[31:0] := -INF - 5 : dest[31:0] := +INF - 6 : dest[31:0] := tsrc.sign? -INF : +INF - 7 : dest[31:0] := -0 - 8 : dest[31:0] := +0 - 9 : dest[31:0] := -1 - 10: dest[31:0] := +1 - 11: dest[31:0] := 1/2 - 12: dest[31:0] := 90.0 - 13: dest[31:0] := PI/2 - 14: dest[31:0] := MAX_FLOAT - 15: dest[31:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[31:0] - } - dst[31:0] := FIXUPIMMPD(a[31:0], b[31:0], c[31:0], imm8[7:0]) - dst[127:32] := b[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Fix up the lower single-precision (32-bit) floating-point elements in "a" and "b" using the lower 32-bit integer in "c", store the result in the lower element of "dst", and copy the upper 3 packed elements from "b" to the upper elements of "dst". "imm8" is used to set the required flags reporting. + [sae_note] + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? -INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1/2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[31:0] +} +dst[31:0] := FIXUPIMMPD(a[31:0], b[31:0], c[31:0], imm8[7:0]) +dst[127:32] := b[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Fix up the lower single-precision (32-bit) floating-point elements in "a" and - "b" using the lower 32-bit integer in "c", store the result in the lower element of - "dst", and copy the upper 3 packed elements from "b" to the upper elements of "dst". - "imm8" is used to set the required flags reporting. - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { - tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] - CASE(tsrc[31:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[31:0] := src1[31:0] - 1 : dest[31:0] := tsrc[31:0] - 2 : dest[31:0] := QNaN(tsrc[31:0]) - 3 : dest[31:0] := QNAN_Indefinite - 4 : dest[31:0] := -INF - 5 : dest[31:0] := +INF - 6 : dest[31:0] := tsrc.sign? -INF : +INF - 7 : dest[31:0] := -0 - 8 : dest[31:0] := +0 - 9 : dest[31:0] := -1 - 10: dest[31:0] := +1 - 11: dest[31:0] := 1/2 - 12: dest[31:0] := 90.0 - 13: dest[31:0] := PI/2 - 14: dest[31:0] := MAX_FLOAT - 15: dest[31:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[31:0] - } - dst[31:0] := FIXUPIMMPD(a[31:0], b[31:0], c[31:0], imm8[7:0]) - dst[127:32] := b[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + Fix up the lower single-precision (32-bit) floating-point elements in "a" and "b" using the lower 32-bit integer in "c", store the result in the lower element of "dst", and copy the upper 3 packed elements from "b" to the upper elements of "dst". "imm8" is used to set the required flags reporting. + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? -INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1/2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[31:0] +} +dst[31:0] := FIXUPIMMPD(a[31:0], b[31:0], c[31:0], imm8[7:0]) +dst[127:32] := b[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - - Fix up the lower single-precision (32-bit) floating-point elements in "a" and - "b" using the lower 32-bit integer in "c", store the result in the lower element of - "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), - and copy the upper 3 packed elements from "b" to the upper elements of "dst". "imm8" is - used to set the required flags reporting. - [sae_note] - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { - tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] - CASE(tsrc[31:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[31:0] := src1[31:0] - 1 : dest[31:0] := tsrc[31:0] - 2 : dest[31:0] := QNaN(tsrc[31:0]) - 3 : dest[31:0] := QNAN_Indefinite - 4 : dest[31:0] := -INF - 5 : dest[31:0] := +INF - 6 : dest[31:0] := tsrc.sign? -INF : +INF - 7 : dest[31:0] := -0 - 8 : dest[31:0] := +0 - 9 : dest[31:0] := -1 - 10: dest[31:0] := +1 - 11: dest[31:0] := 1/2 - 12: dest[31:0] := 90.0 - 13: dest[31:0] := PI/2 - 14: dest[31:0] := MAX_FLOAT - 15: dest[31:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[31:0] - } - IF k[0] - dst[31:0] := FIXUPIMMPD(a[31:0], b[31:0], c[31:0], imm8[7:0]) - ELSE - dst[31:0] := a[31:0] - FI - dst[127:32] := b[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + + Fix up the lower single-precision (32-bit) floating-point elements in "a" and "b" using the lower 32-bit integer in "c", store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 3 packed elements from "b" to the upper elements of "dst". "imm8" is used to set the required flags reporting. + [sae_note] + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? -INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1/2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[31:0] +} +IF k[0] + dst[31:0] := FIXUPIMMPD(a[31:0], b[31:0], c[31:0], imm8[7:0]) +ELSE + dst[31:0] := a[31:0] +FI +dst[127:32] := b[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Fix up the lower single-precision (32-bit) floating-point elements in "a" and - "b" using the lower 32-bit integer in "c", store the result in the lower element of - "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), - and copy the upper 3 packed elements from "b" to the upper elements of "dst". "imm8" is - used to set the required flags reporting. - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { - tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] - CASE(tsrc[31:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[31:0] := src1[31:0] - 1 : dest[31:0] := tsrc[31:0] - 2 : dest[31:0] := QNaN(tsrc[31:0]) - 3 : dest[31:0] := QNAN_Indefinite - 4 : dest[31:0] := -INF - 5 : dest[31:0] := +INF - 6 : dest[31:0] := tsrc.sign? -INF : +INF - 7 : dest[31:0] := -0 - 8 : dest[31:0] := +0 - 9 : dest[31:0] := -1 - 10: dest[31:0] := +1 - 11: dest[31:0] := 1/2 - 12: dest[31:0] := 90.0 - 13: dest[31:0] := PI/2 - 14: dest[31:0] := MAX_FLOAT - 15: dest[31:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[31:0] - } - IF k[0] - dst[31:0] := FIXUPIMMPD(a[31:0], b[31:0], c[31:0], imm8[7:0]) - ELSE - dst[31:0] := a[31:0] - FI - dst[127:32] := b[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Fix up the lower single-precision (32-bit) floating-point elements in "a" and "b" using the lower 32-bit integer in "c", store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 3 packed elements from "b" to the upper elements of "dst". "imm8" is used to set the required flags reporting. + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? -INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1/2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[31:0] +} +IF k[0] + dst[31:0] := FIXUPIMMPD(a[31:0], b[31:0], c[31:0], imm8[7:0]) +ELSE + dst[31:0] := a[31:0] +FI +dst[127:32] := b[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - - Fix up the lower single-precision (32-bit) floating-point elements in "a" and - "b" using the lower 32-bit integer in "c", store the result in the lower element of - "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and - copy the upper 3 packed elements from "b" to the upper elements of "dst". "imm8" is used - to set the required flags reporting. - [sae_note] - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { - tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] - CASE(tsrc[31:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[31:0] := src1[31:0] - 1 : dest[31:0] := tsrc[31:0] - 2 : dest[31:0] := QNaN(tsrc[31:0]) - 3 : dest[31:0] := QNAN_Indefinite - 4 : dest[31:0] := -INF - 5 : dest[31:0] := +INF - 6 : dest[31:0] := tsrc.sign? -INF : +INF - 7 : dest[31:0] := -0 - 8 : dest[31:0] := +0 - 9 : dest[31:0] := -1 - 10: dest[31:0] := +1 - 11: dest[31:0] := 1/2 - 12: dest[31:0] := 90.0 - 13: dest[31:0] := PI/2 - 14: dest[31:0] := MAX_FLOAT - 15: dest[31:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[31:0] - } - IF k[0] - dst[31:0] := FIXUPIMMPD(a[31:0], b[31:0], c[31:0], imm8[7:0]) - ELSE - dst[31:0] := 0 - FI - dst[127:32] := b[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + + Fix up the lower single-precision (32-bit) floating-point elements in "a" and "b" using the lower 32-bit integer in "c", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "b" to the upper elements of "dst". "imm8" is used to set the required flags reporting. + [sae_note] + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? -INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1/2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[31:0] +} +IF k[0] + dst[31:0] := FIXUPIMMPD(a[31:0], b[31:0], c[31:0], imm8[7:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := b[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Fix up the lower single-precision (32-bit) floating-point elements in "a" and - "b" using the lower 32-bit integer in "c", store the result in the lower element of - "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and - copy the upper 3 packed elements from "b" to the upper elements of "dst". "imm8" is used - to set the required flags reporting. - enum TOKEN_TYPE { - QNAN_TOKEN := 0, \ - SNAN_TOKEN := 1, \ - ZERO_VALUE_TOKEN := 2, \ - ONE_VALUE_TOKEN := 3, \ - NEG_INF_TOKEN := 4, \ - POS_INF_TOKEN := 5, \ - NEG_VALUE_TOKEN := 6, \ - POS_VALUE_TOKEN := 7 - } - DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { - tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] - CASE(tsrc[31:0]) OF - QNAN_TOKEN:j := 0 - SNAN_TOKEN:j := 1 - ZERO_VALUE_TOKEN: j := 2 - ONE_VALUE_TOKEN: j := 3 - NEG_INF_TOKEN: j := 4 - POS_INF_TOKEN: j := 5 - NEG_VALUE_TOKEN: j := 6 - POS_VALUE_TOKEN: j := 7 - ESAC - - token_response[3:0] := src3[3+4*j:4*j] - - CASE(token_response[3:0]) OF - 0 : dest[31:0] := src1[31:0] - 1 : dest[31:0] := tsrc[31:0] - 2 : dest[31:0] := QNaN(tsrc[31:0]) - 3 : dest[31:0] := QNAN_Indefinite - 4 : dest[31:0] := -INF - 5 : dest[31:0] := +INF - 6 : dest[31:0] := tsrc.sign? -INF : +INF - 7 : dest[31:0] := -0 - 8 : dest[31:0] := +0 - 9 : dest[31:0] := -1 - 10: dest[31:0] := +1 - 11: dest[31:0] := 1/2 - 12: dest[31:0] := 90.0 - 13: dest[31:0] := PI/2 - 14: dest[31:0] := MAX_FLOAT - 15: dest[31:0] := -MAX_FLOAT - ESAC - - CASE(tsrc[31:0]) OF - ZERO_VALUE_TOKEN: - IF (imm8[0]) #ZE; FI - ZERO_VALUE_TOKEN: - IF (imm8[1]) #IE; FI - ONE_VALUE_TOKEN: - IF (imm8[2]) #ZE; FI - ONE_VALUE_TOKEN: - IF (imm8[3]) #IE; FI - SNAN_TOKEN: - IF (imm8[4]) #IE; FI - NEG_INF_TOKEN: - IF (imm8[5]) #IE; FI - NEG_VALUE_TOKEN: - IF (imm8[6]) #IE; FI - POS_INF_TOKEN: - IF (imm8[7]) #IE; FI - ESAC - RETURN dest[31:0] - } - IF k[0] - dst[31:0] := FIXUPIMMPD(a[31:0], b[31:0], c[31:0], imm8[7:0]) - ELSE - dst[31:0] := 0 - FI - dst[127:32] := b[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Fix up the lower single-precision (32-bit) floating-point elements in "a" and "b" using the lower 32-bit integer in "c", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "b" to the upper elements of "dst". "imm8" is used to set the required flags reporting. + enum TOKEN_TYPE { + QNAN_TOKEN := 0, \ + SNAN_TOKEN := 1, \ + ZERO_VALUE_TOKEN := 2, \ + ONE_VALUE_TOKEN := 3, \ + NEG_INF_TOKEN := 4, \ + POS_INF_TOKEN := 5, \ + NEG_VALUE_TOKEN := 6, \ + POS_VALUE_TOKEN := 7 +} +DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) { + tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0] + CASE(tsrc[31:0]) OF + QNAN_TOKEN:j := 0 + SNAN_TOKEN:j := 1 + ZERO_VALUE_TOKEN: j := 2 + ONE_VALUE_TOKEN: j := 3 + NEG_INF_TOKEN: j := 4 + POS_INF_TOKEN: j := 5 + NEG_VALUE_TOKEN: j := 6 + POS_VALUE_TOKEN: j := 7 + ESAC + + token_response[3:0] := src3[3+4*j:4*j] + + CASE(token_response[3:0]) OF + 0 : dest[31:0] := src1[31:0] + 1 : dest[31:0] := tsrc[31:0] + 2 : dest[31:0] := QNaN(tsrc[31:0]) + 3 : dest[31:0] := QNAN_Indefinite + 4 : dest[31:0] := -INF + 5 : dest[31:0] := +INF + 6 : dest[31:0] := tsrc.sign? -INF : +INF + 7 : dest[31:0] := -0 + 8 : dest[31:0] := +0 + 9 : dest[31:0] := -1 + 10: dest[31:0] := +1 + 11: dest[31:0] := 1/2 + 12: dest[31:0] := 90.0 + 13: dest[31:0] := PI/2 + 14: dest[31:0] := MAX_FLOAT + 15: dest[31:0] := -MAX_FLOAT + ESAC + + CASE(tsrc[31:0]) OF + ZERO_VALUE_TOKEN: + IF (imm8[0]) #ZE; FI + ZERO_VALUE_TOKEN: + IF (imm8[1]) #IE; FI + ONE_VALUE_TOKEN: + IF (imm8[2]) #ZE; FI + ONE_VALUE_TOKEN: + IF (imm8[3]) #IE; FI + SNAN_TOKEN: + IF (imm8[4]) #IE; FI + NEG_INF_TOKEN: + IF (imm8[5]) #IE; FI + NEG_VALUE_TOKEN: + IF (imm8[6]) #IE; FI + POS_INF_TOKEN: + IF (imm8[7]) #IE; FI + ESAC + RETURN dest[31:0] +} +IF k[0] + dst[31:0] := FIXUPIMMPD(a[31:0], b[31:0], c[31:0], imm8[7:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := b[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - Convert the exponent of each packed double-precision (64-bit) floating-point - element in "a" to a double-precision (64-bit) floating-point number representing the - integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). This intrinsic essentially calculates - "floor(log2(x))" for each element. - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ConvertExpFP64(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Convert the exponent of each packed double-precision (64-bit) floating-point - element in "a" to a double-precision (64-bit) floating-point number representing the - integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). This intrinsic essentially calculates - "floor(log2(x))" for each element. - [sae_note] - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ConvertExpFP64(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + [sae_note] + FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - Convert the exponent of each packed single-precision (32-bit) floating-point - element in "a" to a single-precision (32-bit) floating-point number representing the - integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). This intrinsic essentially calculates - "floor(log2(x))" for each element. - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ConvertExpFP32(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Convert the exponent of each packed single-precision (32-bit) floating-point - element in "a" to a single-precision (32-bit) floating-point number representing the - integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). This intrinsic essentially calculates - "floor(log2(x))" for each element. - [sae_note] - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ConvertExpFP32(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + [sae_note] + FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Convert the exponent of the lower double-precision (64-bit) floating-point - element in "b" to a double-precision (64-bit) floating-point number representing the - integer exponent, store the result in the lower element of "dst", and copy the upper - element from "a" to the upper element of "dst". This intrinsic essentially calculates - "floor(log2(x))" for the lower element. - [sae_note] - dst[63:0] := ConvertExpFP64(b[63:0]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + Convert the exponent of the lower double-precision (64-bit) floating-point element in "b" to a double-precision (64-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + [sae_note] + dst[63:0] := ConvertExpFP64(b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - Convert the exponent of the lower double-precision (64-bit) floating-point - element in "b" to a double-precision (64-bit) floating-point number representing the - integer exponent, store the result in the lower element of "dst", and copy the upper - element from "a" to the upper element of "dst". This intrinsic essentially calculates - "floor(log2(x))" for the lower element. - dst[63:0] := ConvertExpFP64(b[63:0]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + Convert the exponent of the lower double-precision (64-bit) floating-point element in "b" to a double-precision (64-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + dst[63:0] := ConvertExpFP64(b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Convert the exponent of the lower double-precision (64-bit) floating-point - element in "b" to a double-precision (64-bit) floating-point number representing the - integer exponent, store the result in the lower element of "dst" using writemask "k" - (the element is copied from "src" when mask bit 0 is not set), and copy the upper - element from "a" to the upper element of "dst". This intrinsic essentially calculates - "floor(log2(x))" for the lower element. - [sae_note] - IF k[0] - dst[63:0] := ConvertExpFP64(b[63:0]) - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Convert the exponent of the lower double-precision (64-bit) floating-point element in "b" to a double-precision (64-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + [sae_note] + IF k[0] + dst[63:0] := ConvertExpFP64(b[63:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Convert the exponent of the lower double-precision (64-bit) floating-point - element in "b" to a double-precision (64-bit) floating-point number representing the - integer exponent, store the result in the lower element of "dst" using writemask "k" - (the element is copied from "src" when mask bit 0 is not set), and copy the upper - element from "a" to the upper element of "dst". This intrinsic essentially calculates - "floor(log2(x))" for the lower element. - IF k[0] - dst[63:0] := ConvertExpFP64(b[63:0]) - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + Convert the exponent of the lower double-precision (64-bit) floating-point element in "b" to a double-precision (64-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + IF k[0] + dst[63:0] := ConvertExpFP64(b[63:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Convert the exponent of the lower double-precision (64-bit) floating-point - element in "b" to a double-precision (64-bit) floating-point number representing the - integer exponent, store the result in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" - to the upper element of "dst". This intrinsic essentially calculates "floor(log2(x))" - for the lower element. - [sae_note] - IF k[0] - dst[63:0] := ConvertExpFP64(b[63:0]) - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + Convert the exponent of the lower double-precision (64-bit) floating-point element in "b" to a double-precision (64-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + [sae_note] + IF k[0] + dst[63:0] := ConvertExpFP64(b[63:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Convert the exponent of the lower double-precision (64-bit) floating-point - element in "b" to a double-precision (64-bit) floating-point number representing the - integer exponent, store the result in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" - to the upper element of "dst". This intrinsic essentially calculates "floor(log2(x))" - for the lower element. - IF k[0] - dst[63:0] := ConvertExpFP64(b[63:0]) - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + Convert the exponent of the lower double-precision (64-bit) floating-point element in "b" to a double-precision (64-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + IF k[0] + dst[63:0] := ConvertExpFP64(b[63:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Convert the exponent of the lower single-precision (32-bit) floating-point - element in "b" to a single-precision (32-bit) floating-point number representing the - integer exponent, store the result in the lower element of "dst", and copy the upper 3 - packed elements from "a" to the upper elements of "dst". This intrinsic essentially - calculates "floor(log2(x))" for the lower element. - [sae_note] - dst[31:0] := ConvertExpFP32(b[31:0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + Convert the exponent of the lower single-precision (32-bit) floating-point element in "b" to a single-precision (32-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + [sae_note] + dst[31:0] := ConvertExpFP32(b[31:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - Convert the exponent of the lower single-precision (32-bit) floating-point - element in "b" to a single-precision (32-bit) floating-point number representing the - integer exponent, store the result in the lower element of "dst", and copy the upper 3 - packed elements from "a" to the upper elements of "dst". This intrinsic essentially - calculates "floor(log2(x))" for the lower element. - dst[31:0] := ConvertExpFP32(b[31:0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + Convert the exponent of the lower single-precision (32-bit) floating-point element in "b" to a single-precision (32-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + dst[31:0] := ConvertExpFP32(b[31:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Convert the exponent of the lower single-precision (32-bit) floating-point - element in "b" to a single-precision (32-bit) floating-point number representing the - integer exponent, store the result in the lower element of "dst" using writemask "k" - (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 - packed elements from "a" to the upper elements of "dst". This intrinsic essentially - calculates "floor(log2(x))" for the lower element. - [sae_note] - IF k[0] - dst[31:0] := ConvertExpFP32(b[31:0]) - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Convert the exponent of the lower single-precision (32-bit) floating-point element in "b" to a single-precision (32-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + [sae_note] + IF k[0] + dst[31:0] := ConvertExpFP32(b[31:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Convert the exponent of the lower single-precision (32-bit) floating-point - element in "b" to a single-precision (32-bit) floating-point number representing the - integer exponent, store the result in the lower element of "dst" using writemask "k" - (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 - packed elements from "a" to the upper elements of "dst". This intrinsic essentially - calculates "floor(log2(x))" for the lower element. - IF k[0] - dst[31:0] := ConvertExpFP32(b[31:0]) - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + Convert the exponent of the lower single-precision (32-bit) floating-point element in "b" to a single-precision (32-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + IF k[0] + dst[31:0] := ConvertExpFP32(b[31:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Convert the exponent of the lower single-precision (32-bit) floating-point - element in "b" to a single-precision (32-bit) floating-point number representing the - integer exponent, store the result in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements - from "a" to the upper elements of "dst". This intrinsic essentially calculates - "floor(log2(x))" for the lower element. - [sae_note] - IF k[0] - dst[31:0] := ConvertExpFP32(b[31:0]) - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + Convert the exponent of the lower single-precision (32-bit) floating-point element in "b" to a single-precision (32-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + [sae_note] + IF k[0] + dst[31:0] := ConvertExpFP32(b[31:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Convert the exponent of the lower single-precision (32-bit) floating-point - element in "b" to a single-precision (32-bit) floating-point number representing the - integer exponent, store the result in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements - from "a" to the upper elements of "dst". This intrinsic essentially calculates - "floor(log2(x))" for the lower element. - IF k[0] - dst[31:0] := ConvertExpFP32(b[31:0]) - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + Convert the exponent of the lower single-precision (32-bit) floating-point element in "b" to a single-precision (32-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + IF k[0] + dst[31:0] := ConvertExpFP32(b[31:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Normalize the mantissas of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by - "interv" and the sign depends on "sc" and the source sign. - [getmant_note] - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Normalize the mantissas of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by - "interv" and the sign depends on "sc" and the source sign. - [getmant_note][sae_note] - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note][sae_note] + FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Normalize the mantissas of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by - "interv" and the sign depends on "sc" and the source sign. - [getmant_note] - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Normalize the mantissas of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by - "interv" and the sign depends on "sc" and the source sign. - [getmant_note][sae_note] - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note][sae_note] + FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Normalize the mantissas of the lower double-precision (64-bit) floating-point - element in "b", store the result in the lower element of "dst", and copy the upper - element from "a" to the upper element of "dst". This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by - "interv" and the sign depends on "sc" and the source sign. - [getmant_note][sae_note] - dst[63:0] := GetNormalizedMantissa(b[63:0], sc, interv) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Normalize the mantissas of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note][sae_note] + dst[63:0] := GetNormalizedMantissa(b[63:0], sc, interv) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Normalize the mantissas of the lower double-precision (64-bit) floating-point - element in "b", store the result in the lower element of "dst", and copy the upper - element from "a" to the upper element of "dst". This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by - "interv" and the sign depends on "sc" and the source sign. - [getmant_note] - dst[63:0] := GetNormalizedMantissa(b[63:0], sc, interv) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + Normalize the mantissas of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + dst[63:0] := GetNormalizedMantissa(b[63:0], sc, interv) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - - - Normalize the mantissas of the lower double-precision (64-bit) floating-point - element in "b", store the result in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper element - from "a" to the upper element of "dst". This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by - "interv" and the sign depends on "sc" and the source sign. - [getmant_note][sae_note] - IF k[0] - dst[63:0] := GetNormalizedMantissa(b[63:0], sc, interv) - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + + + Normalize the mantissas of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note][sae_note] + IF k[0] + dst[63:0] := GetNormalizedMantissa(b[63:0], sc, interv) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - - Normalize the mantissas of the lower double-precision (64-bit) floating-point - element in "b", store the result in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper element - from "a" to the upper element of "dst". This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by - "interv" and the sign depends on "sc" and the source sign. - [getmant_note] - IF k[0] - dst[63:0] := GetNormalizedMantissa(b[63:0], sc, interv) - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + + Normalize the mantissas of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + IF k[0] + dst[63:0] := GetNormalizedMantissa(b[63:0], sc, interv) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - - Normalize the mantissas of the lower double-precision (64-bit) floating-point - element in "b", store the result in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" - to the upper element of "dst". This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by - "interv" and the sign depends on "sc" and the source sign. - [getmant_note][sae_note] - IF k[0] - dst[63:0] := GetNormalizedMantissa(b[63:0], sc, interv) - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + + Normalize the mantissas of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note][sae_note] + IF k[0] + dst[63:0] := GetNormalizedMantissa(b[63:0], sc, interv) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Normalize the mantissas of the lower double-precision (64-bit) floating-point - element in "b", store the result in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" - to the upper element of "dst". This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by - "interv" and the sign depends on "sc" and the source sign. - [getmant_note] - IF k[0] - dst[63:0] := GetNormalizedMantissa(b[63:0], sc, interv) - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Normalize the mantissas of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + IF k[0] + dst[63:0] := GetNormalizedMantissa(b[63:0], sc, interv) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Normalize the mantissas of the lower single-precision (32-bit) floating-point - element in "b", store the result in the lower element of "dst", and copy the upper 3 - packed elements from "a" to the upper elements of "dst". This intrinsic essentially - calculates "±(2^k)*|x.significand|", where "k" depends on the interval range - defined by "interv" and the sign depends on "sc" and the source sign. - [getmant_note][sae_note] - dst[31:0] := GetNormalizedMantissa(b[31:0], sc, interv) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Normalize the mantissas of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note][sae_note] + dst[31:0] := GetNormalizedMantissa(b[31:0], sc, interv) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Normalize the mantissas of the lower single-precision (32-bit) floating-point - element in "b", store the result in the lower element of "dst", and copy the upper 3 - packed elements from "a" to the upper elements of "dst". This intrinsic essentially - calculates "±(2^k)*|x.significand|", where "k" depends on the interval range - defined by "interv" and the sign depends on "sc" and the source sign. - [getmant_note] - dst[31:0] := GetNormalizedMantissa(b[31:0], sc, interv) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + Normalize the mantissas of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + dst[31:0] := GetNormalizedMantissa(b[31:0], sc, interv) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - - - Normalize the mantissas of the lower single-precision (32-bit) floating-point - element in "b", store the result in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed - elements from "a" to the upper elements of "dst". This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by - "interv" and the sign depends on "sc" and the source sign. - [getmant_note][sae_note] - IF k[0] - dst[31:0] := GetNormalizedMantissa(b[31:0], sc, interv) - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + + + Normalize the mantissas of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note][sae_note] + IF k[0] + dst[31:0] := GetNormalizedMantissa(b[31:0], sc, interv) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - - Normalize the mantissas of the lower single-precision (32-bit) floating-point - element in "b", store the result in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed - elements from "a" to the upper elements of "dst". This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by - "interv" and the sign depends on "sc" and the source sign. - [getmant_note] - IF k[0] - dst[31:0] := GetNormalizedMantissa(b[31:0], sc, interv) - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + + Normalize the mantissas of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + IF k[0] + dst[31:0] := GetNormalizedMantissa(b[31:0], sc, interv) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - - Normalize the mantissas of the lower single-precision (32-bit) floating-point - element in "b", store the result in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements - from "a" to the upper elements of "dst". This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by - "interv" and the sign depends on "sc" and the source sign. - [getmant_note][sae_note] - IF k[0] - dst[31:0] := GetNormalizedMantissa(b[31:0], sc, interv) - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + + Normalize the mantissas of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note][sae_note] + IF k[0] + dst[31:0] := GetNormalizedMantissa(b[31:0], sc, interv) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Normalize the mantissas of the lower single-precision (32-bit) floating-point - element in "b", store the result in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements - from "a" to the upper elements of "dst". This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by - "interv" and the sign depends on "sc" and the source sign. - [getmant_note] - IF k[0] - dst[31:0] := GetNormalizedMantissa(b[31:0], sc, interv) - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Normalize the mantissas of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + IF k[0] + dst[31:0] := GetNormalizedMantissa(b[31:0], sc, interv) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Rotate the bits in each packed 32-bit integer in "a" to the right by the number - of bits specified in the corresponding element of "b", and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src >>count) OR (src << (32 - count)) - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Round packed double-precision (64-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). [round_imm_note] - - DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - IF IsInf(tmp[63:0]) - tmp[63:0] := src1[63:0] - FI - RETURN tmp[63:0] - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note] + +DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + IF IsInf(tmp[63:0]) + tmp[63:0] := src1[63:0] + FI + RETURN tmp[63:0] +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Round packed double-precision (64-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). [round_imm_note][sae_note] - - DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - IF IsInf(tmp[63:0]) - tmp[63:0] := src1[63:0] - FI - RETURN tmp[63:0] - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note][sae_note] + +DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + IF IsInf(tmp[63:0]) + tmp[63:0] := src1[63:0] + FI + RETURN tmp[63:0] +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Round packed double-precision (64-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [round_imm_note] - - DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - IF IsInf(tmp[63:0]) - tmp[63:0] := src1[63:0] - FI - RETURN tmp[63:0] - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note] + +DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + IF IsInf(tmp[63:0]) + tmp[63:0] := src1[63:0] + FI + RETURN tmp[63:0] +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Round packed double-precision (64-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [round_imm_note][sae_note] - - DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - IF IsInf(tmp[63:0]) - tmp[63:0] := src1[63:0] - FI - RETURN tmp[63:0] - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note][sae_note] + +DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + IF IsInf(tmp[63:0]) + tmp[63:0] := src1[63:0] + FI + RETURN tmp[63:0] +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - Round packed double-precision (64-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst". - [round_imm_note] - - DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - IF IsInf(tmp[63:0]) - tmp[63:0] := src1[63:0] - FI - RETURN tmp[63:0] - } - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". [round_imm_note] + +DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + IF IsInf(tmp[63:0]) + tmp[63:0] := src1[63:0] + FI + RETURN tmp[63:0] +} +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Round packed double-precision (64-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst". - [round_imm_note][sae_note] - - DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - IF IsInf(tmp[63:0]) - tmp[63:0] := src1[63:0] - FI - RETURN tmp[63:0] - } - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". [round_imm_note][sae_note] + +DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + IF IsInf(tmp[63:0]) + tmp[63:0] := src1[63:0] + FI + RETURN tmp[63:0] +} +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Round packed single-precision (32-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). [round_imm_note] - - DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - IF IsInf(tmp[31:0]) - tmp[31:0] := src1[31:0] - FI - RETURN tmp[31:0] - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note] + +DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + IF IsInf(tmp[31:0]) + tmp[31:0] := src1[31:0] + FI + RETURN tmp[31:0] +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Round packed single-precision (32-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). [round_imm_note][sae_note] - - DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - IF IsInf(tmp[31:0]) - tmp[31:0] := src1[31:0] - FI - RETURN tmp[31:0] - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note][sae_note] + +DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + IF IsInf(tmp[31:0]) + tmp[31:0] := src1[31:0] + FI + RETURN tmp[31:0] +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Round packed single-precision (32-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [round_imm_note] - - DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - IF IsInf(tmp[31:0]) - tmp[31:0] := src1[31:0] - FI - RETURN tmp[31:0] - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note] + +DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + IF IsInf(tmp[31:0]) + tmp[31:0] := src1[31:0] + FI + RETURN tmp[31:0] +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Round packed single-precision (32-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [round_imm_note][sae_note] - - DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - IF IsInf(tmp[31:0]) - tmp[31:0] := src1[31:0] - FI - RETURN tmp[31:0] - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note][sae_note] + +DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + IF IsInf(tmp[31:0]) + tmp[31:0] := src1[31:0] + FI + RETURN tmp[31:0] +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - Round packed single-precision (32-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst". - [round_imm_note] - - DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - IF IsInf(tmp[31:0]) - tmp[31:0] := src1[31:0] - FI - RETURN tmp[31:0] - } - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". [round_imm_note] + +DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + IF IsInf(tmp[31:0]) + tmp[31:0] := src1[31:0] + FI + RETURN tmp[31:0] +} +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Round packed single-precision (32-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst". - [round_imm_note][sae_note] - - DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - IF IsInf(tmp[31:0]) - tmp[31:0] := src1[31:0] - FI - RETURN tmp[31:0] - } - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". [round_imm_note][sae_note] + +DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + IF IsInf(tmp[31:0]) + tmp[31:0] := src1[31:0] + FI + RETURN tmp[31:0] +} +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - - Round the lower double-precision (64-bit) floating-point element in "b" to the - number of fraction bits specified by "imm8", store the result in the lower element of - "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), - and copy the upper element from "a" to the upper element of "dst". - [round_imm_note][sae_note] - - DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - IF IsInf(tmp[63:0]) - tmp[63:0] := src1[63:0] - FI - RETURN tmp[63:0] - } - IF k[0] - dst[63:0] := RoundScaleFP64(b[63:0], imm8[7:0]) - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + + Round the lower double-precision (64-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". [round_imm_note][sae_note] + +DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + IF IsInf(tmp[63:0]) + tmp[63:0] := src1[63:0] + FI + RETURN tmp[63:0] +} +IF k[0] + dst[63:0] := RoundScaleFP64(b[63:0], imm8[7:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Round the lower double-precision (64-bit) floating-point element in "b" to the - number of fraction bits specified by "imm8", store the result in the lower element of - "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), - and copy the upper element from "a" to the upper element of "dst". [round_imm_note] - - DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - IF IsInf(tmp[63:0]) - tmp[63:0] := src1[63:0] - FI - RETURN tmp[63:0] - } - IF k[0] - dst[63:0] := RoundScaleFP64(b[63:0], imm8[7:0]) - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Round the lower double-precision (64-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". [round_imm_note] + +DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + IF IsInf(tmp[63:0]) + tmp[63:0] := src1[63:0] + FI + RETURN tmp[63:0] +} +IF k[0] + dst[63:0] := RoundScaleFP64(b[63:0], imm8[7:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Round the lower double-precision (64-bit) floating-point element in "b" to the - number of fraction bits specified by "imm8", store the result in the lower element of - "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and - copy the upper element from "a" to the upper element of "dst". - [round_imm_note][sae_note] - - DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - IF IsInf(tmp[63:0]) - tmp[63:0] := src1[63:0] - FI - RETURN tmp[63:0] - } - IF k[0] - dst[63:0] := RoundScaleFP64(b[63:0], imm8[7:0]) - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Round the lower double-precision (64-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". [round_imm_note][sae_note] + +DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + IF IsInf(tmp[63:0]) + tmp[63:0] := src1[63:0] + FI + RETURN tmp[63:0] +} +IF k[0] + dst[63:0] := RoundScaleFP64(b[63:0], imm8[7:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Round the lower double-precision (64-bit) floating-point element in "b" to the - number of fraction bits specified by "imm8", store the result in the lower element of - "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and - copy the upper element from "a" to the upper element of "dst". [round_imm_note] - - DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - IF IsInf(tmp[63:0]) - tmp[63:0] := src1[63:0] - FI - RETURN tmp[63:0] - } - IF k[0] - dst[63:0] := RoundScaleFP64(b[63:0], imm8[7:0]) - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + Round the lower double-precision (64-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". [round_imm_note] + +DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + IF IsInf(tmp[63:0]) + tmp[63:0] := src1[63:0] + FI + RETURN tmp[63:0] +} +IF k[0] + dst[63:0] := RoundScaleFP64(b[63:0], imm8[7:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Round the lower double-precision (64-bit) floating-point element in "b" to the - number of fraction bits specified by "imm8", store the result in the lower element of - "dst", and copy the upper element from "a" to the upper element of "dst". - [round_imm_note][sae_note] - - DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - IF IsInf(tmp[63:0]) - tmp[63:0] := src1[63:0] - FI - RETURN tmp[63:0] - } - dst[63:0] := RoundScaleFP64(b[63:0], imm8[7:0]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + Round the lower double-precision (64-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". [round_imm_note][sae_note] + +DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + IF IsInf(tmp[63:0]) + tmp[63:0] := src1[63:0] + FI + RETURN tmp[63:0] +} +dst[63:0] := RoundScaleFP64(b[63:0], imm8[7:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Round the lower double-precision (64-bit) floating-point element in "b" to the - number of fraction bits specified by "imm8", store the result in the lower element of - "dst", and copy the upper element from "a" to the upper element of "dst". - [round_imm_note] - - DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { - m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) - IF IsInf(tmp[63:0]) - tmp[63:0] := src1[63:0] - FI - RETURN tmp[63:0] - } - dst[63:0] := RoundScaleFP64(b[63:0], imm8[7:0]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + Round the lower double-precision (64-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". [round_imm_note] + +DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) { + m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0]) + IF IsInf(tmp[63:0]) + tmp[63:0] := src1[63:0] + FI + RETURN tmp[63:0] +} +dst[63:0] := RoundScaleFP64(b[63:0], imm8[7:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - - Round the lower single-precision (32-bit) floating-point element in "b" to the - number of fraction bits specified by "imm8", store the result in the lower element of - "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), - and copy the upper 3 packed elements from "a" to the upper elements of "dst". - [round_imm_note][sae_note] - - DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - IF IsInf(tmp[31:0]) - tmp[31:0] := src1[31:0] - FI - RETURN tmp[31:0] - } - IF k[0] - dst[31:0] := RoundScaleFP32(b[31:0], imm8[7:0]) - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + + Round the lower single-precision (32-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". [round_imm_note][sae_note] + +DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + IF IsInf(tmp[31:0]) + tmp[31:0] := src1[31:0] + FI + RETURN tmp[31:0] +} +IF k[0] + dst[31:0] := RoundScaleFP32(b[31:0], imm8[7:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Round the lower single-precision (32-bit) floating-point element in "b" to the - number of fraction bits specified by "imm8", store the result in the lower element of - "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), - and copy the upper 3 packed elements from "a" to the upper elements of "dst". - [round_imm_note] - - DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - IF IsInf(tmp[31:0]) - tmp[31:0] := src1[31:0] - FI - RETURN tmp[31:0] - } - IF k[0] - dst[31:0] := RoundScaleFP32(b[31:0], imm8[7:0]) - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Round the lower single-precision (32-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". [round_imm_note] + +DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + IF IsInf(tmp[31:0]) + tmp[31:0] := src1[31:0] + FI + RETURN tmp[31:0] +} +IF k[0] + dst[31:0] := RoundScaleFP32(b[31:0], imm8[7:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Round the lower single-precision (32-bit) floating-point element in "b" to the - number of fraction bits specified by "imm8", store the result in the lower element of - "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and - copy the upper 3 packed elements from "a" to the upper elements of "dst". - [round_imm_note][sae_note] - - DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - IF IsInf(tmp[31:0]) - tmp[31:0] := src1[31:0] - FI - RETURN tmp[31:0] - } - IF k[0] - dst[31:0] := RoundScaleFP32(b[31:0], imm8[7:0]) - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Round the lower single-precision (32-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". [round_imm_note][sae_note] + +DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + IF IsInf(tmp[31:0]) + tmp[31:0] := src1[31:0] + FI + RETURN tmp[31:0] +} +IF k[0] + dst[31:0] := RoundScaleFP32(b[31:0], imm8[7:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Round the lower single-precision (32-bit) floating-point element in "b" to the - number of fraction bits specified by "imm8", store the result in the lower element of - "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and - copy the upper 3 packed elements from "a" to the upper elements of "dst". - [round_imm_note] - - DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - IF IsInf(tmp[31:0]) - tmp[31:0] := src1[31:0] - FI - RETURN tmp[31:0] - } - IF k[0] - dst[31:0] := RoundScaleFP32(b[31:0], imm8[7:0]) - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + Round the lower single-precision (32-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". [round_imm_note] + +DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + IF IsInf(tmp[31:0]) + tmp[31:0] := src1[31:0] + FI + RETURN tmp[31:0] +} +IF k[0] + dst[31:0] := RoundScaleFP32(b[31:0], imm8[7:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Round the lower single-precision (32-bit) floating-point element in "b" to the - number of fraction bits specified by "imm8", store the result in the lower element of - "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". - [round_imm_note][sae_note] - - DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - IF IsInf(tmp[31:0]) - tmp[31:0] := src1[31:0] - FI - RETURN tmp[31:0] - } - dst[31:0] := RoundScaleFP32(b[31:0], imm8[7:0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + Round the lower single-precision (32-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". [round_imm_note][sae_note] + +DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + IF IsInf(tmp[31:0]) + tmp[31:0] := src1[31:0] + FI + RETURN tmp[31:0] +} +dst[31:0] := RoundScaleFP32(b[31:0], imm8[7:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Round the lower single-precision (32-bit) floating-point element in "b" to the - number of fraction bits specified by "imm8", store the result in the lower element of - "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". - [round_imm_note] - - DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { - m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) - IF IsInf(tmp[31:0]) - tmp[31:0] := src1[31:0] - FI - RETURN tmp[31:0] - } - dst[31:0] := RoundScaleFP32(b[31:0], imm8[7:0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + Round the lower single-precision (32-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". [round_imm_note] + +DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) { + m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0]) + IF IsInf(tmp[31:0]) + tmp[31:0] := src1[31:0] + FI + RETURN tmp[31:0] +} +dst[31:0] := RoundScaleFP32(b[31:0], imm8[7:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Scale the packed double-precision (64-bit) floating-point elements in "a" using - values from "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) - RETURN dst[63:0] - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Scale the packed double-precision (64-bit) floating-point elements in "a" using - values from "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - [round_note] - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) - RETURN dst[63:0] - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Scale the packed double-precision (64-bit) floating-point elements in "a" using - values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) - RETURN dst[63:0] - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Scale the packed double-precision (64-bit) floating-point elements in "a" using - values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - [round_note] - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) - RETURN dst[63:0] - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Miscellaneous
- - - - Scale the packed double-precision (64-bit) floating-point elements in "a" using - values from "b", and store the results in "dst". - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst". + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) - RETURN dst[63:0] - } - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Scale the packed double-precision (64-bit) floating-point elements in "a" using - values from "b", and store the results in "dst". - [round_note] - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst". + [round_note] + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) - RETURN dst[63:0] - } - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Scale the packed single-precision (32-bit) floating-point elements in "a" using - values from "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) - RETURN dst[31:0] - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) + RETURN dst[31:0] +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Scale the packed single-precision (32-bit) floating-point elements in "a" using - values from "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - [round_note] - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) - RETURN dst[31:0] - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) + RETURN dst[31:0] +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Scale the packed single-precision (32-bit) floating-point elements in "a" using - values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) - RETURN dst[31:0] - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) + RETURN dst[31:0] +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Scale the packed single-precision (32-bit) floating-point elements in "a" using - values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - [round_note] - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) - RETURN dst[31:0] - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) + RETURN dst[31:0] +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Miscellaneous
- - - - Scale the packed single-precision (32-bit) floating-point elements in "a" using - values from "b", and store the results in "dst". - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst". + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) - RETURN dst[31:0] - } - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) + RETURN dst[31:0] +} +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Scale the packed single-precision (32-bit) floating-point elements in "a" using - values from "b", and store the results in "dst". - [round_note] - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst". + [round_note] + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) - RETURN dst[31:0] - } - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) + RETURN dst[31:0] +} +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Scale the packed double-precision (64-bit) floating-point elements in "a" using - values from "b", store the result in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper element - from "a" to the upper element of "dst". - [round_note] - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) - RETURN dst[63:0] - } - IF k[0] - dst[63:0] := SCALE(a[63:0], b[63:0]) - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} +IF k[0] + dst[63:0] := SCALE(a[63:0], b[63:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 +
+ + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Scale the packed double-precision (64-bit) floating-point elements in "a" using - values from "b", store the result in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper element - from "a" to the upper element of "dst". - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) - RETURN dst[63:0] - } - IF k[0] - dst[63:0] := SCALE(a[63:0], b[63:0]) - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} +IF k[0] + dst[63:0] := SCALE(a[63:0], b[63:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 +
+ + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Scale the packed double-precision (64-bit) floating-point elements in "a" using - values from "b", store the result in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" - to the upper element of "dst". - [round_note] - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) - RETURN dst[63:0] - } - IF k[0] - dst[63:0] := SCALE(a[63:0], b[63:0]) - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} +IF k[0] + dst[63:0] := SCALE(a[63:0], b[63:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 +
+ + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Scale the packed double-precision (64-bit) floating-point elements in "a" using - values from "b", store the result in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" - to the upper element of "dst". - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) - RETURN dst[63:0] - } - IF k[0] - dst[63:0] := SCALE(a[63:0], b[63:0]) - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} +IF k[0] + dst[63:0] := SCALE(a[63:0], b[63:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 +
+ + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Scale the packed double-precision (64-bit) floating-point elements in "a" using - values from "b", store the result in the lower element of "dst", and copy the upper - element from "a" to the upper element of "dst". - [round_note] - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) - RETURN dst[63:0] - } - dst[63:0] := SCALE(a[63:0], b[63:0]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} +dst[63:0] := SCALE(a[63:0], b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 +
+ + AVX512F +
immintrin.h
+ Miscellaneous
- - - - Scale the packed double-precision (64-bit) floating-point elements in "a" using - values from "b", store the result in the lower element of "dst", and copy the upper - element from "a" to the upper element of "dst". - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) - RETURN dst[63:0] - } - dst[63:0] := SCALE(a[63:0], b[63:0]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + FI + FI + dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0])) + RETURN dst[63:0] +} +dst[63:0] := SCALE(a[63:0], b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 +
+ + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Scale the packed single-precision (32-bit) floating-point elements in "a" using - values from "b", store the result in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed - elements from "a" to the upper elements of "dst". - [round_note] - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) - RETURN dst[63:0] - } - IF k[0] - dst[31:0] := SCALE(a[31:0], b[31:0]) - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) + RETURN dst[63:0] +} +IF k[0] + dst[31:0] := SCALE(a[31:0], b[31:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 +
+ + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Scale the packed single-precision (32-bit) floating-point elements in "a" using - values from "b", store the result in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed - elements from "a" to the upper elements of "dst". - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) - RETURN dst[63:0] - } - IF k[0] - dst[31:0] := SCALE(a[31:0], b[31:0]) - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) + RETURN dst[63:0] +} +IF k[0] + dst[31:0] := SCALE(a[31:0], b[31:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 +
+ + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Scale the packed single-precision (32-bit) floating-point elements in "a" using - values from "b", store the result in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements - from "a" to the upper elements of "dst". - [round_note] - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) - RETURN dst[63:0] - } - IF k[0] - dst[31:0] := SCALE(a[31:0], b[31:0]) - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) + RETURN dst[63:0] +} +IF k[0] + dst[31:0] := SCALE(a[31:0], b[31:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 +
+ + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Scale the packed single-precision (32-bit) floating-point elements in "a" using - values from "b", store the result in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements - from "a" to the upper elements of "dst". - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) - RETURN dst[63:0] - } - IF k[0] - dst[31:0] := SCALE(a[31:0], b[31:0]) - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) + RETURN dst[63:0] +} +IF k[0] + dst[31:0] := SCALE(a[31:0], b[31:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 +
+ + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Scale the packed single-precision (32-bit) floating-point elements in "a" using - values from "b", store the result in the lower element of "dst", and copy the upper 3 - packed elements from "a" to the upper elements of "dst". - [round_note] - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) - RETURN dst[63:0] - } - dst[31:0] := SCALE(a[31:0], b[31:0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) + RETURN dst[63:0] +} +dst[31:0] := SCALE(a[31:0], b[31:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 +
+ + AVX512F +
immintrin.h
+ Miscellaneous
- - - - Scale the packed single-precision (32-bit) floating-point elements in "a" using - values from "b", store the result in the lower element of "dst", and copy the upper 3 - packed elements from "a" to the upper elements of "dst". - DEFINE SCALE(src1, src2) { - IF (src2 == NaN) - IF (src2 == SNaN) + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + DEFINE SCALE(src1, src2) { + IF (src2 == NaN) + IF (src2 == SNaN) RETURN QNAN(src2) - FI - ELSE IF (src1 == NaN) - IF (src1 == SNaN) + FI + ELSE IF (src1 == NaN) + IF (src1 == SNaN) RETURN QNAN(src1) - FI - IF (src2 != INF) + FI + IF (src2 != INF) RETURN QNAN(src1) - FI - ELSE - tmp_src2 := src2 - tmp_src1 := src1 - IF (IS_DENORMAL(src2) AND MXCSR.DAZ) + FI + ELSE + tmp_src2 := src2 + tmp_src1 := src1 + IF (IS_DENORMAL(src2) AND MXCSR.DAZ) tmp_src2 := 0 - FI - IF (IS_DENORMAL(src1) AND MXCSR.DAZ) + FI + IF (IS_DENORMAL(src1) AND MXCSR.DAZ) tmp_src1 := 0 - FI - FI - dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) - RETURN dst[63:0] - } - dst[31:0] := SCALE(a[31:0], b[31:0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + FI + FI + dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0])) + RETURN dst[63:0] +} +dst[31:0] := SCALE(a[31:0], b[31:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 +
+ + AVX512F +
immintrin.h
+ Miscellaneous
- - - Broadcast the 4 packed single-precision (32-bit) floating-point elements from - "a" to all elements of "dst". - - FOR j := 0 to 15 - i := j*32 - n := (j % 4)*32 - dst[i+31:i] := a[n+31:n] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + Broadcast the 4 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst". + +FOR j := 0 to 15 + i := j*32 + n := (j % 4)*32 + dst[i+31:i] := a[n+31:n] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Broadcast the 4 packed single-precision (32-bit) floating-point elements from - "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - n := (j % 4)*32 - IF k[j] - dst[i+31:i] := a[n+31:n] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Broadcast the 4 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + n := (j % 4)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Broadcast the 4 packed single-precision (32-bit) floating-point elements from - "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - n := (j % 4)*32 - IF k[j] - dst[i+31:i] := a[n+31:n] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Broadcast the 4 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + n := (j % 4)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - Broadcast the 4 packed double-precision (64-bit) floating-point elements from - "a" to all elements of "dst". - - FOR j := 0 to 7 - i := j*64 - n := (j % 4)*64 - dst[i+63:i] := a[n+63:n] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + Broadcast the 4 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*64 + n := (j % 4)*64 + dst[i+63:i] := a[n+63:n] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Broadcast the 4 packed double-precision (64-bit) floating-point elements from - "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - n := (j % 4)*64 - IF k[j] - dst[i+63:i] := a[n+63:n] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Broadcast the 4 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + n := (j % 4)*64 + IF k[j] + dst[i+63:i] := a[n+63:n] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Broadcast the 4 packed double-precision (64-bit) floating-point elements from - "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - n := (j % 4)*64 - IF k[j] - dst[i+63:i] := a[n+63:n] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Broadcast the 4 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + n := (j % 4)*64 + IF k[j] + dst[i+63:i] := a[n+63:n] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - Broadcast the 4 packed 32-bit integers from "a" to all elements of "dst". - - FOR j := 0 to 15 - i := j*32 - n := (j % 4)*32 - dst[i+31:i] := a[n+31:n] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + Broadcast the 4 packed 32-bit integers from "a" to all elements of "dst". + +FOR j := 0 to 15 + i := j*32 + n := (j % 4)*32 + dst[i+31:i] := a[n+31:n] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Broadcast the 4 packed 32-bit integers from "a" to all elements of "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - i := j*32 - n := (j % 4)*32 - IF k[j] - dst[i+31:i] := a[n+31:n] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Broadcast the 4 packed 32-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + n := (j % 4)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Broadcast the 4 packed 32-bit integers from "a" to all elements of "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - n := (j % 4)*32 - IF k[j] - dst[i+31:i] := a[n+31:n] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Broadcast the 4 packed 32-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + n := (j % 4)*32 + IF k[j] + dst[i+31:i] := a[n+31:n] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - Broadcast the 4 packed 64-bit integers from "a" to all elements of "dst". - - FOR j := 0 to 7 - i := j*64 - n := (j % 4)*64 - dst[i+63:i] := a[n+63:n] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + Broadcast the 4 packed 64-bit integers from "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*64 + n := (j % 4)*64 + dst[i+63:i] := a[n+63:n] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Broadcast the 4 packed 64-bit integers from "a" to all elements of "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*64 - n := (j % 4)*64 - IF k[j] - dst[i+63:i] := a[n+63:n] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Broadcast the 4 packed 64-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + n := (j % 4)*64 + IF k[j] + dst[i+63:i] := a[n+63:n] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Broadcast the 4 packed 64-bit integers from "a" to all elements of "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - n := (j % 4)*64 - IF k[j] - dst[i+63:i] := a[n+63:n] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Broadcast the 4 packed 64-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + n := (j % 4)*64 + IF k[j] + dst[i+63:i] := a[n+63:n] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - Broadcast the low double-precision (64-bit) floating-point element from "a" to - all elements of "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := a[63:0] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + Broadcast the low double-precision (64-bit) floating-point element from "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Broadcast the low double-precision (64-bit) floating-point element from "a" to - all elements of "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[63:0] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Broadcast the low double-precision (64-bit) floating-point element from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Broadcast the low double-precision (64-bit) floating-point element from "a" to - all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[63:0] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Broadcast the low double-precision (64-bit) floating-point element from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - Broadcast the low single-precision (32-bit) floating-point element from "a" to - all elements of "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := a[31:0] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Broadcast the low single-precision (32-bit) floating-point element from "a" to - all elements of "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[31:0] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Broadcast the low single-precision (32-bit) floating-point element from "a" to - all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[31:0] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Contiguously store the active double-precision (64-bit) floating-point elements - in "a" (those with their respective bit set in writemask "k") to "dst", and pass through - the remaining elements from "src". - - size := 64 - m := 0 - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[m+size-1:m] := a[i+63:i] - m := m + size - FI - ENDFOR - dst[511:m] := src[511:m] - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 64 +m := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR +dst[511:m] := src[511:m] +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Contiguously store the active double-precision (64-bit) floating-point elements - in "a" (those with their respective bit set in zeromask "k") to "dst", and set the - remaining elements to zero. - - size := 64 - m := 0 - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[m+size-1:m] := a[i+63:i] - m := m + size - FI - ENDFOR - dst[511:m] := 0 - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 64 +m := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR +dst[511:m] := 0 +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Contiguously store the active single-precision (32-bit) floating-point elements - in "a" (those with their respective bit set in writemask "k") to "dst", and pass through - the remaining elements from "src". - - size := 32 - m := 0 - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[m+size-1:m] := a[i+31:i] - m := m + size - FI - ENDFOR - dst[511:m] := src[511:m] - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 32 +m := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR +dst[511:m] := src[511:m] +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Contiguously store the active single-precision (32-bit) floating-point elements - in "a" (those with their respective bit set in zeromask "k") to "dst", and set the - remaining elements to zero. - - size := 32 - m := 0 - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[m+size-1:m] := a[i+31:i] - m := m + size - FI - ENDFOR - dst[511:m] := 0 - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 32 +m := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR +dst[511:m] := 0 +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Load contiguous active double-precision (64-bit) floating-point elements from - "a" (those with their respective bit set in mask "k"), and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - m := 0 - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[m+63:m] - m := m + 64 - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Load contiguous active double-precision (64-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[m+63:m] + m := m + 64 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Load contiguous active double-precision (64-bit) floating-point elements from - "a" (those with their respective bit set in mask "k"), and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[m+63:m] - m := m + 64 - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Load contiguous active double-precision (64-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[m+63:m] + m := m + 64 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Load contiguous active single-precision (32-bit) floating-point elements from - "a" (those with their respective bit set in mask "k"), and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - m := 0 - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[m+31:m] - m := m + 32 - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Load contiguous active single-precision (32-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[m+31:m] + m := m + 32 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Load contiguous active single-precision (32-bit) floating-point elements from - "a" (those with their respective bit set in mask "k"), and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[m+31:m] - m := m + 32 - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Load contiguous active single-precision (32-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[m+31:m] + m := m + 32 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point - elements) from "a", selected with "imm8", and store the result in "dst". - - CASE imm8[1:0] OF - 0: dst[127:0] := a[127:0] - 1: dst[127:0] := a[255:128] - 2: dst[127:0] := a[383:256] - 3: dst[127:0] := a[511:384] - ESAC - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[1:0] OF +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +2: dst[127:0] := a[383:256] +3: dst[127:0] := a[511:384] +ESAC +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point - elements) from "a", selected with "imm8", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - CASE imm8[1:0] OF - 0: tmp[127:0] := a[127:0] - 1: tmp[127:0] := a[255:128] - 2: tmp[127:0] := a[383:256] - 3: tmp[127:0] := a[511:384] - ESAC - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE imm8[1:0] OF +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +2: tmp[127:0] := a[383:256] +3: tmp[127:0] := a[511:384] +ESAC +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point - elements) from "a", selected with "imm8", and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - CASE imm8[1:0] OF - 0: tmp[127:0] := a[127:0] - 1: tmp[127:0] := a[255:128] - 2: tmp[127:0] := a[383:256] - 3: tmp[127:0] := a[511:384] - ESAC - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE imm8[1:0] OF +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +2: tmp[127:0] := a[383:256] +3: tmp[127:0] := a[511:384] +ESAC +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Extract 256 bits (composed of 4 packed double-precision (64-bit) floating-point - elements) from "a", selected with "imm8", and store the result in "dst". - - CASE imm8[0] OF - 0: dst[255:0] := a[255:0] - 1: dst[255:0] := a[511:256] - ESAC - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Extract 256 bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[0] OF +0: dst[255:0] := a[255:0] +1: dst[255:0] := a[511:256] +ESAC +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Extract 256 bits (composed of 4 packed double-precision (64-bit) floating-point - elements) from "a", selected with "imm8", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - CASE imm8[0] OF - 0: tmp[255:0] := a[255:0] - 1: tmp[255:0] := a[511:256] - ESAC - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Extract 256 bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE imm8[0] OF +0: tmp[255:0] := a[255:0] +1: tmp[255:0] := a[511:256] +ESAC +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Extract 256 bits (composed of 4 packed double-precision (64-bit) floating-point - elements) from "a", selected with "imm8", and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - CASE imm8[0] OF - 0: tmp[255:0] := a[255:0] - 1: tmp[255:0] := a[511:256] - ESAC - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Extract 256 bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE imm8[0] OF +0: tmp[255:0] := a[255:0] +1: tmp[255:0] := a[511:256] +ESAC +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Extract 128 bits (composed of 4 packed 32-bit integers) from "a", selected with - "imm8", and store the result in "dst". - - CASE imm8[1:0] OF - 0: dst[127:0] := a[127:0] - 1: dst[127:0] := a[255:128] - 2: dst[127:0] := a[383:256] - 3: dst[127:0] := a[511:384] - ESAC - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Extract 128 bits (composed of 4 packed 32-bit integers) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[1:0] OF +0: dst[127:0] := a[127:0] +1: dst[127:0] := a[255:128] +2: dst[127:0] := a[383:256] +3: dst[127:0] := a[511:384] +ESAC +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Extract 128 bits (composed of 4 packed 32-bit integers) from "a", selected with - "imm8", and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - CASE imm8[1:0] OF - 0: tmp[127:0] := a[127:0] - 1: tmp[127:0] := a[255:128] - 2: tmp[127:0] := a[383:256] - 3: tmp[127:0] := a[511:384] - ESAC - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Extract 128 bits (composed of 4 packed 32-bit integers) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE imm8[1:0] OF +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +2: tmp[127:0] := a[383:256] +3: tmp[127:0] := a[511:384] +ESAC +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Extract 128 bits (composed of 4 packed 32-bit integers) from "a", selected with - "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - CASE imm8[1:0] OF - 0: tmp[127:0] := a[127:0] - 1: tmp[127:0] := a[255:128] - 2: tmp[127:0] := a[383:256] - 3: tmp[127:0] := a[511:384] - ESAC - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Extract 128 bits (composed of 4 packed 32-bit integers) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE imm8[1:0] OF +0: tmp[127:0] := a[127:0] +1: tmp[127:0] := a[255:128] +2: tmp[127:0] := a[383:256] +3: tmp[127:0] := a[511:384] +ESAC +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Extract 256 bits (composed of 4 packed 64-bit integers) from "a", selected with - "imm8", and store the result in "dst". - - CASE imm8[0] OF - 0: dst[255:0] := a[255:0] - 1: dst[255:0] := a[511:256] - ESAC - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Extract 256 bits (composed of 4 packed 64-bit integers) from "a", selected with "imm8", and store the result in "dst". + +CASE imm8[0] OF +0: dst[255:0] := a[255:0] +1: dst[255:0] := a[511:256] +ESAC +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Extract 256 bits (composed of 4 packed 64-bit integers) from "a", selected with - "imm8", and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - CASE imm8[0] OF - 0: tmp[255:0] := a[255:0] - 1: tmp[255:0] := a[511:256] - ESAC - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Extract 256 bits (composed of 4 packed 64-bit integers) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +CASE imm8[0] OF +0: tmp[255:0] := a[255:0] +1: tmp[255:0] := a[511:256] +ESAC +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Extract 256 bits (composed of 4 packed 64-bit integers) from "a", selected with - "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - CASE imm8[0] OF - 0: tmp[255:0] := a[255:0] - 1: tmp[255:0] := a[511:256] - ESAC - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Extract 256 bits (composed of 4 packed 64-bit integers) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +CASE imm8[0] OF +0: tmp[255:0] := a[255:0] +1: tmp[255:0] := a[511:256] +ESAC +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Copy "a" to "dst", then insert 128 bits (composed of 4 packed single-precision - (32-bit) floating-point elements) from "b" into "dst" at the location specified by - "imm8". - - dst[511:0] := a[511:0] - CASE (imm8[1:0]) OF - 0: dst[127:0] := b[127:0] - 1: dst[255:128] := b[127:0] - 2: dst[383:256] := b[127:0] - 3: dst[511:384] := b[127:0] - ESAC - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Copy "a" to "dst", then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "b" into "dst" at the location specified by "imm8". + +dst[511:0] := a[511:0] +CASE (imm8[1:0]) OF +0: dst[127:0] := b[127:0] +1: dst[255:128] := b[127:0] +2: dst[383:256] := b[127:0] +3: dst[511:384] := b[127:0] +ESAC +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - - Copy "a" to "tmp", then insert 128 bits (composed of 4 packed single-precision - (32-bit) floating-point elements) from "b" into "tmp" at the location specified by - "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - tmp[511:0] := a[511:0] - CASE (imm8[1:0]) OF - 0: tmp[127:0] := b[127:0] - 1: tmp[255:128] := b[127:0] - 2: tmp[383:256] := b[127:0] - 3: tmp[511:384] := b[127:0] - ESAC - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[511:0] := a[511:0] +CASE (imm8[1:0]) OF +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +2: tmp[383:256] := b[127:0] +3: tmp[511:384] := b[127:0] +ESAC +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Copy "a" to "tmp", then insert 128 bits (composed of 4 packed single-precision - (32-bit) floating-point elements) from "b" into "tmp" at the location specified by - "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - tmp[511:0] := a[511:0] - CASE (imm8[1:0]) OF - 0: tmp[127:0] := b[127:0] - 1: tmp[255:128] := b[127:0] - 2: tmp[383:256] := b[127:0] - 3: tmp[511:384] := b[127:0] - ESAC - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[511:0] := a[511:0] +CASE (imm8[1:0]) OF +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +2: tmp[383:256] := b[127:0] +3: tmp[511:384] := b[127:0] +ESAC +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Copy "a" to "dst", then insert 256 bits (composed of 4 packed double-precision - (64-bit) floating-point elements) from "b" into "dst" at the location specified by - "imm8". - - dst[511:0] := a[511:0] - CASE (imm8[0]) OF - 0: dst[255:0] := b[255:0] - 1: dst[511:256] := b[255:0] - ESAC - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Copy "a" to "dst", then insert 256 bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "b" into "dst" at the location specified by "imm8". + +dst[511:0] := a[511:0] +CASE (imm8[0]) OF +0: dst[255:0] := b[255:0] +1: dst[511:256] := b[255:0] +ESAC +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - - Copy "a" to "tmp", then insert 256 bits (composed of 4 packed double-precision - (64-bit) floating-point elements) from "b" into "tmp" at the location specified by - "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - tmp[511:0] := a[511:0] - CASE (imm8[0]) OF - 0: tmp[255:0] := b[255:0] - 1: tmp[511:256] := b[255:0] - ESAC - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + + Copy "a" to "tmp", then insert 256 bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[511:0] := a[511:0] +CASE (imm8[0]) OF +0: tmp[255:0] := b[255:0] +1: tmp[511:256] := b[255:0] +ESAC +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Copy "a" to "tmp", then insert 256 bits (composed of 4 packed double-precision - (64-bit) floating-point elements) from "b" into "tmp" at the location specified by - "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - tmp[511:0] := a[511:0] - CASE (imm8[0]) OF - 0: tmp[255:0] := b[255:0] - 1: tmp[511:256] := b[255:0] - ESAC - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Copy "a" to "tmp", then insert 256 bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[511:0] := a[511:0] +CASE (imm8[0]) OF +0: tmp[255:0] := b[255:0] +1: tmp[511:256] := b[255:0] +ESAC +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Copy "a" to "dst", then insert 128 bits (composed of 4 packed 32-bit integers) - from "b" into "dst" at the location specified by "imm8". - - dst[511:0] := a[511:0] - CASE (imm8[1:0]) OF - 0: dst[127:0] := b[127:0] - 1: dst[255:128] := b[127:0] - 2: dst[383:256] := b[127:0] - 3: dst[511:384] := b[127:0] - ESAC - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Copy "a" to "dst", then insert 128 bits (composed of 4 packed 32-bit integers) from "b" into "dst" at the location specified by "imm8". + +dst[511:0] := a[511:0] +CASE (imm8[1:0]) OF +0: dst[127:0] := b[127:0] +1: dst[255:128] := b[127:0] +2: dst[383:256] := b[127:0] +3: dst[511:384] := b[127:0] +ESAC +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - - Copy "a" to "tmp", then insert 128 bits (composed of 4 packed 32-bit integers) - from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - tmp[511:0] := a[511:0] - CASE (imm8[1:0]) OF - 0: tmp[127:0] := b[127:0] - 1: tmp[255:128] := b[127:0] - 2: tmp[383:256] := b[127:0] - 3: tmp[511:384] := b[127:0] - ESAC - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 4 packed 32-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[511:0] := a[511:0] +CASE (imm8[1:0]) OF +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +2: tmp[383:256] := b[127:0] +3: tmp[511:384] := b[127:0] +ESAC +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Copy "a" to "tmp", then insert 128 bits (composed of 4 packed 32-bit integers) - from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - tmp[511:0] := a[511:0] - CASE (imm8[1:0]) OF - 0: tmp[127:0] := b[127:0] - 1: tmp[255:128] := b[127:0] - 2: tmp[383:256] := b[127:0] - 3: tmp[511:384] := b[127:0] - ESAC - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Copy "a" to "tmp", then insert 128 bits (composed of 4 packed 32-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[511:0] := a[511:0] +CASE (imm8[1:0]) OF +0: tmp[127:0] := b[127:0] +1: tmp[255:128] := b[127:0] +2: tmp[383:256] := b[127:0] +3: tmp[511:384] := b[127:0] +ESAC +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Copy "a" to "dst", then insert 256 bits (composed of 4 packed 64-bit integers) - from "b" into "dst" at the location specified by "imm8". - - dst[511:0] := a[511:0] - CASE (imm8[0]) OF - 0: dst[255:0] := b[255:0] - 1: dst[511:256] := b[255:0] - ESAC - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Copy "a" to "dst", then insert 256 bits (composed of 4 packed 64-bit integers) from "b" into "dst" at the location specified by "imm8". + +dst[511:0] := a[511:0] +CASE (imm8[0]) OF +0: dst[255:0] := b[255:0] +1: dst[511:256] := b[255:0] +ESAC +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - - Copy "a" to "tmp", then insert 256 bits (composed of 4 packed 64-bit integers) - from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - tmp[511:0] := a[511:0] - CASE (imm8[0]) OF - 0: tmp[255:0] := b[255:0] - 1: tmp[511:256] := b[255:0] - ESAC - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + + Copy "a" to "tmp", then insert 256 bits (composed of 4 packed 64-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[511:0] := a[511:0] +CASE (imm8[0]) OF +0: tmp[255:0] := b[255:0] +1: tmp[511:256] := b[255:0] +ESAC +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Copy "a" to "tmp", then insert 256 bits (composed of 4 packed 64-bit integers) - from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - tmp[511:0] := a[511:0] - CASE (imm8[0]) OF - 0: tmp[255:0] := b[255:0] - 1: tmp[511:256] := b[255:0] - ESAC - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Copy "a" to "tmp", then insert 256 bits (composed of 4 packed 64-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[511:0] := a[511:0] +CASE (imm8[0]) OF +0: tmp[255:0] := b[255:0] +1: tmp[511:256] := b[255:0] +ESAC +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - Broadcast the low packed 32-bit integer from "a" to all elements of "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := a[31:0] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + Broadcast the low packed 32-bit integer from "a" to all elements of "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Broadcast the low packed 32-bit integer from "a" to all elements of "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[31:0] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Broadcast the low packed 32-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Broadcast the low packed 32-bit integer from "a" to all elements of "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[31:0] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Broadcast the low packed 32-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - Broadcast the low packed 64-bit integer from "a" to all elements of "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := a[63:0] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + Broadcast the low packed 64-bit integer from "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Broadcast the low packed 64-bit integer from "a" to all elements of "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[63:0] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Broadcast the low packed 64-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Broadcast the low packed 64-bit integer from "a" to all elements of "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[63:0] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Broadcast the low packed 64-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Contiguously store the active 32-bit integers in "a" (those with their - respective bit set in writemask "k") to "dst", and pass through the remaining elements - from "src". - - size := 32 - m := 0 - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[m+size-1:m] := a[i+31:i] - m := m + size - FI - ENDFOR - dst[511:m] := src[511:m] - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Contiguously store the active 32-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 32 +m := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR +dst[511:m] := src[511:m] +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Contiguously store the active 32-bit integers in "a" (those with their - respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. - - size := 32 - m := 0 - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[m+size-1:m] := a[i+31:i] - m := m + size - FI - ENDFOR - dst[511:m] := 0 - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Contiguously store the active 32-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 32 +m := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[m+size-1:m] := a[i+31:i] + m := m + size + FI +ENDFOR +dst[511:m] := 0 +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Contiguously store the active 64-bit integers in "a" (those with their - respective bit set in writemask "k") to "dst", and pass through the remaining elements - from "src". - - size := 64 - m := 0 - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[m+size-1:m] := a[i+63:i] - m := m + size - FI - ENDFOR - dst[511:m] := src[511:m] - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Contiguously store the active 64-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 64 +m := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR +dst[511:m] := src[511:m] +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Contiguously store the active 64-bit integers in "a" (those with their - respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. - - size := 64 - m := 0 - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[m+size-1:m] := a[i+63:i] - m := m + size - FI - ENDFOR - dst[511:m] := 0 - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Contiguously store the active 64-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 64 +m := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[m+size-1:m] := a[i+63:i] + m := m + size + FI +ENDFOR +dst[511:m] := 0 +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle 32-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - id := idx[i+3:i]*32 - IF k[j] - dst[i+31:i] := a[id+31:id] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + id := idx[i+3:i]*32 + IF k[j] + dst[i+31:i] := a[id+31:id] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Shuffle 32-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - id := idx[i+3:i]*32 - IF k[j] - dst[i+31:i] := a[id+31:id] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + id := idx[i+3:i]*32 + IF k[j] + dst[i+31:i] := a[id+31:id] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Shuffle 32-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - id := idx[i+3:i]*32 - dst[i+31:i] := a[id+31:id] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + id := idx[i+3:i]*32 + dst[i+31:i] := a[id+31:id] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst" using writemask "k" - (elements are copied from "idx" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - off := idx[i+3:i]*32 - IF k[j] - dst[i+31:i] := idx[i+4] ? b[off+31:off] : a[off+31:off] - ELSE - dst[i+31:i] := idx[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + off := idx[i+3:i]*32 + IF k[j] + dst[i+31:i] := idx[i+4] ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := idx[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst" using writemask "k" - (elements are copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - off := idx[i+3:i]*32 - IF k[j] - dst[i+31:i] := idx[i+4] ? b[off+31:off] : a[off+31:off] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + off := idx[i+3:i]*32 + IF k[j] + dst[i+31:i] := idx[i+4] ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - off := idx[i+3:i]*32 - IF k[j] - dst[i+31:i] := (idx[i+4]) ? b[off+31:off] : a[off+31:off] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + off := idx[i+3:i]*32 + IF k[j] + dst[i+31:i] := (idx[i+4]) ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - off := idx[i+3:i]*32 - dst[i+31:i] := idx[i+4] ? b[off+31:off] : a[off+31:off] - ENDFOR - dst[MAX:512] := 0 - - - - AVX512F -
immintrin.h
- Swizzle + + + + + Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + off := idx[i+3:i]*32 + dst[i+31:i] := idx[i+4] ? b[off+31:off] : a[off+31:off] +ENDFOR +dst[MAX:512] := 0 + + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across - lanes using the corresponding selector and index in "idx", and store the results in - "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask - bit is not set) - - FOR j := 0 to 7 - i := j*64 - off := idx[i+2:i]*64 - IF k[j] - dst[i+63:i] := idx[i+3] ? b[off+63:off] : a[off+63:off] - ELSE - dst[i+63:i] := idx[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set) + +FOR j := 0 to 7 + i := j*64 + off := idx[i+2:i]*64 + IF k[j] + dst[i+63:i] := idx[i+3] ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := idx[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across - lanes using the corresponding selector and index in "idx", and store the results in - "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit - is not set). - - FOR j := 0 to 7 - i := j*64 - off := idx[i+2:i]*64 - IF k[j] - dst[i+63:i] := idx[i+3] ? b[off+63:off] : a[off+63:off] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + off := idx[i+2:i]*64 + IF k[j] + dst[i+63:i] := idx[i+3] ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across - lanes using the corresponding selector and index in "idx", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*64 - off := idx[i+2:i]*64 - IF k[j] - dst[i+63:i] := (idx[i+3]) ? b[off+63:off] : a[off+63:off] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + off := idx[i+2:i]*64 + IF k[j] + dst[i+63:i] := (idx[i+3]) ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across - lanes using the corresponding selector and index in "idx", and store the results in - "dst". - - FOR j := 0 to 7 - i := j*64 - off := idx[i+2:i]*64 - dst[i+63:i] := idx[i+3] ? b[off+63:off] : a[off+63:off] - ENDFOR - dst[MAX:512] := 0 - - - - AVX512F -
immintrin.h
- Swizzle + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + off := idx[i+2:i]*64 + dst[i+63:i] := idx[i+3] ? b[off+63:off] : a[off+63:off] +ENDFOR +dst[MAX:512] := 0 + + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across - lanes using the corresponding selector and index in "idx", and store the results in - "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*32 - off := idx[i+3:i]*32 - IF k[j] - dst[i+31:i] := idx[i+4] ? b[off+31:off] : a[off+31:off] - ELSE - dst[i+31:i] := idx[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + off := idx[i+3:i]*32 + IF k[j] + dst[i+31:i] := idx[i+4] ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := idx[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across - lanes using the corresponding selector and index in "idx", and store the results in - "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit - is not set). - - FOR j := 0 to 15 - i := j*32 - off := idx[i+3:i]*32 - IF k[j] - dst[i+31:i] := idx[i+4] ? b[off+31:off] : a[off+31:off] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + off := idx[i+3:i]*32 + IF k[j] + dst[i+31:i] := idx[i+4] ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across - lanes using the corresponding selector and index in "idx", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - i := j*32 - off := idx[i+3:i]*32 - IF k[j] - dst[i+31:i] := (idx[i+4]) ? b[off+31:off] : a[off+31:off] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + off := idx[i+3:i]*32 + IF k[j] + dst[i+31:i] := (idx[i+4]) ? b[off+31:off] : a[off+31:off] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across - lanes using the corresponding selector and index in "idx", and store the results in - "dst". - - FOR j := 0 to 15 - i := j*32 - off := idx[i+3:i]*32 - dst[i+31:i] := idx[i+4] ? b[off+31:off] : a[off+31:off] - ENDFOR - dst[MAX:512] := 0 - - - - AVX512F -
immintrin.h
- Swizzle + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + off := idx[i+3:i]*32 + dst[i+31:i] := idx[i+4] ? b[off+31:off] : a[off+31:off] +ENDFOR +dst[MAX:512] := 0 + + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst" using writemask "k" - (elements are copied from "idx" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - off := idx[i+2:i]*64 - IF k[j] - dst[i+63:i] := idx[i+3] ? b[off+63:off] : a[off+63:off] - ELSE - dst[i+63:i] := idx[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + off := idx[i+2:i]*64 + IF k[j] + dst[i+63:i] := idx[i+3] ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := idx[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst" using writemask "k" - (elements are copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - off := idx[i+2:i]*64 - IF k[j] - dst[i+63:i] := idx[i+3] ? b[off+63:off] : a[off+63:off] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + off := idx[i+2:i]*64 + IF k[j] + dst[i+63:i] := idx[i+3] ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - off := idx[i+2:i]*64 - IF k[j] - dst[i+63:i] := (idx[i+3]) ? b[off+63:off] : a[off+63:off] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + off := idx[i+2:i]*64 + IF k[j] + dst[i+63:i] := (idx[i+3]) ? b[off+63:off] : a[off+63:off] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - off := idx[i+2:i]*64 - dst[i+63:i] := idx[i+3] ? b[off+63:off] : a[off+63:off] - ENDFOR - dst[MAX:512] := 0 - - - - AVX512F -
immintrin.h
- Swizzle + + + + + Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + off := idx[i+2:i]*64 + dst[i+63:i] := idx[i+3] ? b[off+63:off] : a[off+63:off] +ENDFOR +dst[MAX:512] := 0 + + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit - lanes using the control in "imm8", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - IF (imm8[0] == 0) tmp_dst[63:0] := a[63:0]; FI - IF (imm8[0] == 1) tmp_dst[63:0] := a[127:64]; FI - IF (imm8[1] == 0) tmp_dst[127:64] := a[63:0]; FI - IF (imm8[1] == 1) tmp_dst[127:64] := a[127:64]; FI - IF (imm8[2] == 0) tmp_dst[191:128] := a[191:128]; FI - IF (imm8[2] == 1) tmp_dst[191:128] := a[255:192]; FI - IF (imm8[3] == 0) tmp_dst[255:192] := a[191:128]; FI - IF (imm8[3] == 1) tmp_dst[255:192] := a[255:192]; FI - IF (imm8[4] == 0) tmp_dst[319:256] := a[319:256]; FI - IF (imm8[4] == 1) tmp_dst[319:256] := a[383:320]; FI - IF (imm8[5] == 0) tmp_dst[383:320] := a[319:256]; FI - IF (imm8[5] == 1) tmp_dst[383:320] := a[383:320]; FI - IF (imm8[6] == 0) tmp_dst[447:384] := a[447:384]; FI - IF (imm8[6] == 1) tmp_dst[447:384] := a[511:448]; FI - IF (imm8[7] == 0) tmp_dst[511:448] := a[447:384]; FI - IF (imm8[7] == 1) tmp_dst[511:448] := a[511:448]; FI - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +IF (imm8[0] == 0) tmp_dst[63:0] := a[63:0]; FI +IF (imm8[0] == 1) tmp_dst[63:0] := a[127:64]; FI +IF (imm8[1] == 0) tmp_dst[127:64] := a[63:0]; FI +IF (imm8[1] == 1) tmp_dst[127:64] := a[127:64]; FI +IF (imm8[2] == 0) tmp_dst[191:128] := a[191:128]; FI +IF (imm8[2] == 1) tmp_dst[191:128] := a[255:192]; FI +IF (imm8[3] == 0) tmp_dst[255:192] := a[191:128]; FI +IF (imm8[3] == 1) tmp_dst[255:192] := a[255:192]; FI +IF (imm8[4] == 0) tmp_dst[319:256] := a[319:256]; FI +IF (imm8[4] == 1) tmp_dst[319:256] := a[383:320]; FI +IF (imm8[5] == 0) tmp_dst[383:320] := a[319:256]; FI +IF (imm8[5] == 1) tmp_dst[383:320] := a[383:320]; FI +IF (imm8[6] == 0) tmp_dst[447:384] := a[447:384]; FI +IF (imm8[6] == 1) tmp_dst[447:384] := a[511:448]; FI +IF (imm8[7] == 0) tmp_dst[511:448] := a[447:384]; FI +IF (imm8[7] == 1) tmp_dst[511:448] := a[511:448]; FI +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit - lanes using the control in "b", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - IF (b[1] == 0) tmp_dst[63:0] := a[63:0]; FI - IF (b[1] == 1) tmp_dst[63:0] := a[127:64]; FI - IF (b[65] == 0) tmp_dst[127:64] := a[63:0]; FI - IF (b[65] == 1) tmp_dst[127:64] := a[127:64]; FI - IF (b[129] == 0) tmp_dst[191:128] := a[191:128]; FI - IF (b[129] == 1) tmp_dst[191:128] := a[255:192]; FI - IF (b[193] == 0) tmp_dst[255:192] := a[191:128]; FI - IF (b[193] == 1) tmp_dst[255:192] := a[255:192]; FI - IF (b[257] == 0) tmp_dst[319:256] := a[319:256]; FI - IF (b[257] == 1) tmp_dst[319:256] := a[383:320]; FI - IF (b[321] == 0) tmp_dst[383:320] := a[319:256]; FI - IF (b[321] == 1) tmp_dst[383:320] := a[383:320]; FI - IF (b[385] == 0) tmp_dst[447:384] := a[447:384]; FI - IF (b[385] == 1) tmp_dst[447:384] := a[511:448]; FI - IF (b[449] == 0) tmp_dst[511:448] := a[447:384]; FI - IF (b[449] == 1) tmp_dst[511:448] := a[511:448]; FI - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +IF (b[1] == 0) tmp_dst[63:0] := a[63:0]; FI +IF (b[1] == 1) tmp_dst[63:0] := a[127:64]; FI +IF (b[65] == 0) tmp_dst[127:64] := a[63:0]; FI +IF (b[65] == 1) tmp_dst[127:64] := a[127:64]; FI +IF (b[129] == 0) tmp_dst[191:128] := a[191:128]; FI +IF (b[129] == 1) tmp_dst[191:128] := a[255:192]; FI +IF (b[193] == 0) tmp_dst[255:192] := a[191:128]; FI +IF (b[193] == 1) tmp_dst[255:192] := a[255:192]; FI +IF (b[257] == 0) tmp_dst[319:256] := a[319:256]; FI +IF (b[257] == 1) tmp_dst[319:256] := a[383:320]; FI +IF (b[321] == 0) tmp_dst[383:320] := a[319:256]; FI +IF (b[321] == 1) tmp_dst[383:320] := a[383:320]; FI +IF (b[385] == 0) tmp_dst[447:384] := a[447:384]; FI +IF (b[385] == 1) tmp_dst[447:384] := a[511:448]; FI +IF (b[449] == 0) tmp_dst[511:448] := a[447:384]; FI +IF (b[449] == 1) tmp_dst[511:448] := a[511:448]; FI +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit - lanes using the control in "imm8", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - IF (imm8[0] == 0) tmp_dst[63:0] := a[63:0]; FI - IF (imm8[0] == 1) tmp_dst[63:0] := a[127:64]; FI - IF (imm8[1] == 0) tmp_dst[127:64] := a[63:0]; FI - IF (imm8[1] == 1) tmp_dst[127:64] := a[127:64]; FI - IF (imm8[2] == 0) tmp_dst[191:128] := a[191:128]; FI - IF (imm8[2] == 1) tmp_dst[191:128] := a[255:192]; FI - IF (imm8[3] == 0) tmp_dst[255:192] := a[191:128]; FI - IF (imm8[3] == 1) tmp_dst[255:192] := a[255:192]; FI - IF (imm8[4] == 0) tmp_dst[319:256] := a[319:256]; FI - IF (imm8[4] == 1) tmp_dst[319:256] := a[383:320]; FI - IF (imm8[5] == 0) tmp_dst[383:320] := a[319:256]; FI - IF (imm8[5] == 1) tmp_dst[383:320] := a[383:320]; FI - IF (imm8[6] == 0) tmp_dst[447:384] := a[447:384]; FI - IF (imm8[6] == 1) tmp_dst[447:384] := a[511:448]; FI - IF (imm8[7] == 0) tmp_dst[511:448] := a[447:384]; FI - IF (imm8[7] == 1) tmp_dst[511:448] := a[511:448]; FI - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +IF (imm8[0] == 0) tmp_dst[63:0] := a[63:0]; FI +IF (imm8[0] == 1) tmp_dst[63:0] := a[127:64]; FI +IF (imm8[1] == 0) tmp_dst[127:64] := a[63:0]; FI +IF (imm8[1] == 1) tmp_dst[127:64] := a[127:64]; FI +IF (imm8[2] == 0) tmp_dst[191:128] := a[191:128]; FI +IF (imm8[2] == 1) tmp_dst[191:128] := a[255:192]; FI +IF (imm8[3] == 0) tmp_dst[255:192] := a[191:128]; FI +IF (imm8[3] == 1) tmp_dst[255:192] := a[255:192]; FI +IF (imm8[4] == 0) tmp_dst[319:256] := a[319:256]; FI +IF (imm8[4] == 1) tmp_dst[319:256] := a[383:320]; FI +IF (imm8[5] == 0) tmp_dst[383:320] := a[319:256]; FI +IF (imm8[5] == 1) tmp_dst[383:320] := a[383:320]; FI +IF (imm8[6] == 0) tmp_dst[447:384] := a[447:384]; FI +IF (imm8[6] == 1) tmp_dst[447:384] := a[511:448]; FI +IF (imm8[7] == 0) tmp_dst[511:448] := a[447:384]; FI +IF (imm8[7] == 1) tmp_dst[511:448] := a[511:448]; FI +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit - lanes using the control in "b", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - IF (b[1] == 0) tmp_dst[63:0] := a[63:0]; FI - IF (b[1] == 1) tmp_dst[63:0] := a[127:64]; FI - IF (b[65] == 0) tmp_dst[127:64] := a[63:0]; FI - IF (b[65] == 1) tmp_dst[127:64] := a[127:64]; FI - IF (b[129] == 0) tmp_dst[191:128] := a[191:128]; FI - IF (b[129] == 1) tmp_dst[191:128] := a[255:192]; FI - IF (b[193] == 0) tmp_dst[255:192] := a[191:128]; FI - IF (b[193] == 1) tmp_dst[255:192] := a[255:192]; FI - IF (b[257] == 0) tmp_dst[319:256] := a[319:256]; FI - IF (b[257] == 1) tmp_dst[319:256] := a[383:320]; FI - IF (b[321] == 0) tmp_dst[383:320] := a[319:256]; FI - IF (b[321] == 1) tmp_dst[383:320] := a[383:320]; FI - IF (b[385] == 0) tmp_dst[447:384] := a[447:384]; FI - IF (b[385] == 1) tmp_dst[447:384] := a[511:448]; FI - IF (b[449] == 0) tmp_dst[511:448] := a[447:384]; FI - IF (b[449] == 1) tmp_dst[511:448] := a[511:448]; FI - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +IF (b[1] == 0) tmp_dst[63:0] := a[63:0]; FI +IF (b[1] == 1) tmp_dst[63:0] := a[127:64]; FI +IF (b[65] == 0) tmp_dst[127:64] := a[63:0]; FI +IF (b[65] == 1) tmp_dst[127:64] := a[127:64]; FI +IF (b[129] == 0) tmp_dst[191:128] := a[191:128]; FI +IF (b[129] == 1) tmp_dst[191:128] := a[255:192]; FI +IF (b[193] == 0) tmp_dst[255:192] := a[191:128]; FI +IF (b[193] == 1) tmp_dst[255:192] := a[255:192]; FI +IF (b[257] == 0) tmp_dst[319:256] := a[319:256]; FI +IF (b[257] == 1) tmp_dst[319:256] := a[383:320]; FI +IF (b[321] == 0) tmp_dst[383:320] := a[319:256]; FI +IF (b[321] == 1) tmp_dst[383:320] := a[383:320]; FI +IF (b[385] == 0) tmp_dst[447:384] := a[447:384]; FI +IF (b[385] == 1) tmp_dst[447:384] := a[511:448]; FI +IF (b[449] == 0) tmp_dst[511:448] := a[447:384]; FI +IF (b[449] == 1) tmp_dst[511:448] := a[511:448]; FI +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit - lanes using the control in "imm8", and store the results in "dst". - - IF (imm8[0] == 0) dst[63:0] := a[63:0]; FI - IF (imm8[0] == 1) dst[63:0] := a[127:64]; FI - IF (imm8[1] == 0) dst[127:64] := a[63:0]; FI - IF (imm8[1] == 1) dst[127:64] := a[127:64]; FI - IF (imm8[2] == 0) dst[191:128] := a[191:128]; FI - IF (imm8[2] == 1) dst[191:128] := a[255:192]; FI - IF (imm8[3] == 0) dst[255:192] := a[191:128]; FI - IF (imm8[3] == 1) dst[255:192] := a[255:192]; FI - IF (imm8[4] == 0) dst[319:256] := a[319:256]; FI - IF (imm8[4] == 1) dst[319:256] := a[383:320]; FI - IF (imm8[5] == 0) dst[383:320] := a[319:256]; FI - IF (imm8[5] == 1) dst[383:320] := a[383:320]; FI - IF (imm8[6] == 0) dst[447:384] := a[447:384]; FI - IF (imm8[6] == 1) dst[447:384] := a[511:448]; FI - IF (imm8[7] == 0) dst[511:448] := a[447:384]; FI - IF (imm8[7] == 1) dst[511:448] := a[511:448]; FI - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst". + +IF (imm8[0] == 0) dst[63:0] := a[63:0]; FI +IF (imm8[0] == 1) dst[63:0] := a[127:64]; FI +IF (imm8[1] == 0) dst[127:64] := a[63:0]; FI +IF (imm8[1] == 1) dst[127:64] := a[127:64]; FI +IF (imm8[2] == 0) dst[191:128] := a[191:128]; FI +IF (imm8[2] == 1) dst[191:128] := a[255:192]; FI +IF (imm8[3] == 0) dst[255:192] := a[191:128]; FI +IF (imm8[3] == 1) dst[255:192] := a[255:192]; FI +IF (imm8[4] == 0) dst[319:256] := a[319:256]; FI +IF (imm8[4] == 1) dst[319:256] := a[383:320]; FI +IF (imm8[5] == 0) dst[383:320] := a[319:256]; FI +IF (imm8[5] == 1) dst[383:320] := a[383:320]; FI +IF (imm8[6] == 0) dst[447:384] := a[447:384]; FI +IF (imm8[6] == 1) dst[447:384] := a[511:448]; FI +IF (imm8[7] == 0) dst[511:448] := a[447:384]; FI +IF (imm8[7] == 1) dst[511:448] := a[511:448]; FI +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit - lanes using the control in "b", and store the results in "dst". - - IF (b[1] == 0) dst[63:0] := a[63:0]; FI - IF (b[1] == 1) dst[63:0] := a[127:64]; FI - IF (b[65] == 0) dst[127:64] := a[63:0]; FI - IF (b[65] == 1) dst[127:64] := a[127:64]; FI - IF (b[129] == 0) dst[191:128] := a[191:128]; FI - IF (b[129] == 1) dst[191:128] := a[255:192]; FI - IF (b[193] == 0) dst[255:192] := a[191:128]; FI - IF (b[193] == 1) dst[255:192] := a[255:192]; FI - IF (b[257] == 0) dst[319:256] := a[319:256]; FI - IF (b[257] == 1) dst[319:256] := a[383:320]; FI - IF (b[321] == 0) dst[383:320] := a[319:256]; FI - IF (b[321] == 1) dst[383:320] := a[383:320]; FI - IF (b[385] == 0) dst[447:384] := a[447:384]; FI - IF (b[385] == 1) dst[447:384] := a[511:448]; FI - IF (b[449] == 0) dst[511:448] := a[447:384]; FI - IF (b[449] == 1) dst[511:448] := a[511:448]; FI - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst". + +IF (b[1] == 0) dst[63:0] := a[63:0]; FI +IF (b[1] == 1) dst[63:0] := a[127:64]; FI +IF (b[65] == 0) dst[127:64] := a[63:0]; FI +IF (b[65] == 1) dst[127:64] := a[127:64]; FI +IF (b[129] == 0) dst[191:128] := a[191:128]; FI +IF (b[129] == 1) dst[191:128] := a[255:192]; FI +IF (b[193] == 0) dst[255:192] := a[191:128]; FI +IF (b[193] == 1) dst[255:192] := a[255:192]; FI +IF (b[257] == 0) dst[319:256] := a[319:256]; FI +IF (b[257] == 1) dst[319:256] := a[383:320]; FI +IF (b[321] == 0) dst[383:320] := a[319:256]; FI +IF (b[321] == 1) dst[383:320] := a[383:320]; FI +IF (b[385] == 0) dst[447:384] := a[447:384]; FI +IF (b[385] == 1) dst[447:384] := a[511:448]; FI +IF (b[449] == 0) dst[511:448] := a[447:384]; FI +IF (b[449] == 1) dst[511:448] := a[511:448]; FI +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit - lanes using the control in "imm8", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) - tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) - tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) - tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) - tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4]) - tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6]) - tmp_dst[287:256] := SELECT4(a[383:256], imm8[1:0]) - tmp_dst[319:288] := SELECT4(a[383:256], imm8[3:2]) - tmp_dst[351:320] := SELECT4(a[383:256], imm8[5:4]) - tmp_dst[383:352] := SELECT4(a[383:256], imm8[7:6]) - tmp_dst[415:384] := SELECT4(a[511:384], imm8[1:0]) - tmp_dst[447:416] := SELECT4(a[511:384], imm8[3:2]) - tmp_dst[479:448] := SELECT4(a[511:384], imm8[5:4]) - tmp_dst[511:480] := SELECT4(a[511:384], imm8[7:6]) - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4]) +tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6]) +tmp_dst[287:256] := SELECT4(a[383:256], imm8[1:0]) +tmp_dst[319:288] := SELECT4(a[383:256], imm8[3:2]) +tmp_dst[351:320] := SELECT4(a[383:256], imm8[5:4]) +tmp_dst[383:352] := SELECT4(a[383:256], imm8[7:6]) +tmp_dst[415:384] := SELECT4(a[511:384], imm8[1:0]) +tmp_dst[447:416] := SELECT4(a[511:384], imm8[3:2]) +tmp_dst[479:448] := SELECT4(a[511:384], imm8[5:4]) +tmp_dst[511:480] := SELECT4(a[511:384], imm8[7:6]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit - lanes using the control in "b", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - tmp_dst[31:0] := SELECT4(a[127:0], b[1:0]) - tmp_dst[63:32] := SELECT4(a[127:0], b[33:32]) - tmp_dst[95:64] := SELECT4(a[127:0], b[65:64]) - tmp_dst[127:96] := SELECT4(a[127:0], b[97:96]) - tmp_dst[159:128] := SELECT4(a[255:128], b[129:128]) - tmp_dst[191:160] := SELECT4(a[255:128], b[161:160]) - tmp_dst[223:192] := SELECT4(a[255:128], b[193:192]) - tmp_dst[255:224] := SELECT4(a[255:128], b[225:224]) - tmp_dst[287:256] := SELECT4(a[383:256], b[257:256]) - tmp_dst[319:288] := SELECT4(a[383:256], b[289:288]) - tmp_dst[351:320] := SELECT4(a[383:256], b[321:320]) - tmp_dst[383:352] := SELECT4(a[383:256], b[353:352]) - tmp_dst[415:384] := SELECT4(a[511:384], b[385:384]) - tmp_dst[447:416] := SELECT4(a[511:384], b[417:416]) - tmp_dst[479:448] := SELECT4(a[511:384], b[449:448]) - tmp_dst[511:480] := SELECT4(a[511:384], b[481:480]) - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +tmp_dst[31:0] := SELECT4(a[127:0], b[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], b[33:32]) +tmp_dst[95:64] := SELECT4(a[127:0], b[65:64]) +tmp_dst[127:96] := SELECT4(a[127:0], b[97:96]) +tmp_dst[159:128] := SELECT4(a[255:128], b[129:128]) +tmp_dst[191:160] := SELECT4(a[255:128], b[161:160]) +tmp_dst[223:192] := SELECT4(a[255:128], b[193:192]) +tmp_dst[255:224] := SELECT4(a[255:128], b[225:224]) +tmp_dst[287:256] := SELECT4(a[383:256], b[257:256]) +tmp_dst[319:288] := SELECT4(a[383:256], b[289:288]) +tmp_dst[351:320] := SELECT4(a[383:256], b[321:320]) +tmp_dst[383:352] := SELECT4(a[383:256], b[353:352]) +tmp_dst[415:384] := SELECT4(a[511:384], b[385:384]) +tmp_dst[447:416] := SELECT4(a[511:384], b[417:416]) +tmp_dst[479:448] := SELECT4(a[511:384], b[449:448]) +tmp_dst[511:480] := SELECT4(a[511:384], b[481:480]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit - lanes using the control in "imm8", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) - tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) - tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) - tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) - tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4]) - tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6]) - tmp_dst[287:256] := SELECT4(a[383:256], imm8[1:0]) - tmp_dst[319:288] := SELECT4(a[383:256], imm8[3:2]) - tmp_dst[351:320] := SELECT4(a[383:256], imm8[5:4]) - tmp_dst[383:352] := SELECT4(a[383:256], imm8[7:6]) - tmp_dst[415:384] := SELECT4(a[511:384], imm8[1:0]) - tmp_dst[447:416] := SELECT4(a[511:384], imm8[3:2]) - tmp_dst[479:448] := SELECT4(a[511:384], imm8[5:4]) - tmp_dst[511:480] := SELECT4(a[511:384], imm8[7:6]) - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4]) +tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6]) +tmp_dst[287:256] := SELECT4(a[383:256], imm8[1:0]) +tmp_dst[319:288] := SELECT4(a[383:256], imm8[3:2]) +tmp_dst[351:320] := SELECT4(a[383:256], imm8[5:4]) +tmp_dst[383:352] := SELECT4(a[383:256], imm8[7:6]) +tmp_dst[415:384] := SELECT4(a[511:384], imm8[1:0]) +tmp_dst[447:416] := SELECT4(a[511:384], imm8[3:2]) +tmp_dst[479:448] := SELECT4(a[511:384], imm8[5:4]) +tmp_dst[511:480] := SELECT4(a[511:384], imm8[7:6]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit - lanes using the control in "b", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - tmp_dst[31:0] := SELECT4(a[127:0], b[1:0]) - tmp_dst[63:32] := SELECT4(a[127:0], b[33:32]) - tmp_dst[95:64] := SELECT4(a[127:0], b[65:64]) - tmp_dst[127:96] := SELECT4(a[127:0], b[97:96]) - tmp_dst[159:128] := SELECT4(a[255:128], b[129:128]) - tmp_dst[191:160] := SELECT4(a[255:128], b[161:160]) - tmp_dst[223:192] := SELECT4(a[255:128], b[193:192]) - tmp_dst[255:224] := SELECT4(a[255:128], b[225:224]) - tmp_dst[287:256] := SELECT4(a[383:256], b[257:256]) - tmp_dst[319:288] := SELECT4(a[383:256], b[289:288]) - tmp_dst[351:320] := SELECT4(a[383:256], b[321:320]) - tmp_dst[383:352] := SELECT4(a[383:256], b[353:352]) - tmp_dst[415:384] := SELECT4(a[511:384], b[385:384]) - tmp_dst[447:416] := SELECT4(a[511:384], b[417:416]) - tmp_dst[479:448] := SELECT4(a[511:384], b[449:448]) - tmp_dst[511:480] := SELECT4(a[511:384], b[481:480]) - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +tmp_dst[31:0] := SELECT4(a[127:0], b[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], b[33:32]) +tmp_dst[95:64] := SELECT4(a[127:0], b[65:64]) +tmp_dst[127:96] := SELECT4(a[127:0], b[97:96]) +tmp_dst[159:128] := SELECT4(a[255:128], b[129:128]) +tmp_dst[191:160] := SELECT4(a[255:128], b[161:160]) +tmp_dst[223:192] := SELECT4(a[255:128], b[193:192]) +tmp_dst[255:224] := SELECT4(a[255:128], b[225:224]) +tmp_dst[287:256] := SELECT4(a[383:256], b[257:256]) +tmp_dst[319:288] := SELECT4(a[383:256], b[289:288]) +tmp_dst[351:320] := SELECT4(a[383:256], b[321:320]) +tmp_dst[383:352] := SELECT4(a[383:256], b[353:352]) +tmp_dst[415:384] := SELECT4(a[511:384], b[385:384]) +tmp_dst[447:416] := SELECT4(a[511:384], b[417:416]) +tmp_dst[479:448] := SELECT4(a[511:384], b[449:448]) +tmp_dst[511:480] := SELECT4(a[511:384], b[481:480]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit - lanes using the control in "imm8", and store the results in "dst". - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - dst[95:64] := SELECT4(a[127:0], imm8[5:4]) - dst[127:96] := SELECT4(a[127:0], imm8[7:6]) - dst[159:128] := SELECT4(a[255:128], imm8[1:0]) - dst[191:160] := SELECT4(a[255:128], imm8[3:2]) - dst[223:192] := SELECT4(a[255:128], imm8[5:4]) - dst[255:224] := SELECT4(a[255:128], imm8[7:6]) - dst[287:256] := SELECT4(a[383:256], imm8[1:0]) - dst[319:288] := SELECT4(a[383:256], imm8[3:2]) - dst[351:320] := SELECT4(a[383:256], imm8[5:4]) - dst[383:352] := SELECT4(a[383:256], imm8[7:6]) - dst[415:384] := SELECT4(a[511:384], imm8[1:0]) - dst[447:416] := SELECT4(a[511:384], imm8[3:2]) - dst[479:448] := SELECT4(a[511:384], imm8[5:4]) - dst[511:480] := SELECT4(a[511:384], imm8[7:6]) - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst". + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +dst[223:192] := SELECT4(a[255:128], imm8[5:4]) +dst[255:224] := SELECT4(a[255:128], imm8[7:6]) +dst[287:256] := SELECT4(a[383:256], imm8[1:0]) +dst[319:288] := SELECT4(a[383:256], imm8[3:2]) +dst[351:320] := SELECT4(a[383:256], imm8[5:4]) +dst[383:352] := SELECT4(a[383:256], imm8[7:6]) +dst[415:384] := SELECT4(a[511:384], imm8[1:0]) +dst[447:416] := SELECT4(a[511:384], imm8[3:2]) +dst[479:448] := SELECT4(a[511:384], imm8[5:4]) +dst[511:480] := SELECT4(a[511:384], imm8[7:6]) +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit - lanes using the control in "b", and store the results in "dst". - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - dst[31:0] := SELECT4(a[127:0], b[1:0]) - dst[63:32] := SELECT4(a[127:0], b[33:32]) - dst[95:64] := SELECT4(a[127:0], b[65:64]) - dst[127:96] := SELECT4(a[127:0], b[97:96]) - dst[159:128] := SELECT4(a[255:128], b[129:128]) - dst[191:160] := SELECT4(a[255:128], b[161:160]) - dst[223:192] := SELECT4(a[255:128], b[193:192]) - dst[255:224] := SELECT4(a[255:128], b[225:224]) - dst[287:256] := SELECT4(a[383:256], b[257:256]) - dst[319:288] := SELECT4(a[383:256], b[289:288]) - dst[351:320] := SELECT4(a[383:256], b[321:320]) - dst[383:352] := SELECT4(a[383:256], b[353:352]) - dst[415:384] := SELECT4(a[511:384], b[385:384]) - dst[447:416] := SELECT4(a[511:384], b[417:416]) - dst[479:448] := SELECT4(a[511:384], b[449:448]) - dst[511:480] := SELECT4(a[511:384], b[481:480]) - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst". + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +dst[31:0] := SELECT4(a[127:0], b[1:0]) +dst[63:32] := SELECT4(a[127:0], b[33:32]) +dst[95:64] := SELECT4(a[127:0], b[65:64]) +dst[127:96] := SELECT4(a[127:0], b[97:96]) +dst[159:128] := SELECT4(a[255:128], b[129:128]) +dst[191:160] := SELECT4(a[255:128], b[161:160]) +dst[223:192] := SELECT4(a[255:128], b[193:192]) +dst[255:224] := SELECT4(a[255:128], b[225:224]) +dst[287:256] := SELECT4(a[383:256], b[257:256]) +dst[319:288] := SELECT4(a[383:256], b[289:288]) +dst[351:320] := SELECT4(a[383:256], b[321:320]) +dst[383:352] := SELECT4(a[383:256], b[353:352]) +dst[415:384] := SELECT4(a[511:384], b[385:384]) +dst[447:416] := SELECT4(a[511:384], b[417:416]) +dst[479:448] := SELECT4(a[511:384], b[449:448]) +dst[511:480] := SELECT4(a[511:384], b[481:480]) +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle double-precision (64-bit) floating-point elements in "a" within 256-bit - lanes using the control in "imm8", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[63:0] := src[63:0] - 1: tmp[63:0] := src[127:64] - 2: tmp[63:0] := src[191:128] - 3: tmp[63:0] := src[255:192] - ESAC - RETURN tmp[63:0] - } - tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0]) - tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2]) - tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4]) - tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6]) - tmp_dst[319:256] := SELECT4(a[511:256], imm8[1:0]) - tmp_dst[383:320] := SELECT4(a[511:256], imm8[3:2]) - tmp_dst[447:384] := SELECT4(a[511:256], imm8[5:4]) - tmp_dst[511:448] := SELECT4(a[511:256], imm8[7:6]) - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 256-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} +tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +tmp_dst[319:256] := SELECT4(a[511:256], imm8[1:0]) +tmp_dst[383:320] := SELECT4(a[511:256], imm8[3:2]) +tmp_dst[447:384] := SELECT4(a[511:256], imm8[5:4]) +tmp_dst[511:448] := SELECT4(a[511:256], imm8[7:6]) +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle double-precision (64-bit) floating-point elements in "a" across lanes - using the corresponding index in "idx", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - id := idx[i+2:i]*64 - IF k[j] - dst[i+63:i] := a[id+63:id] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + id := idx[i+2:i]*64 + IF k[j] + dst[i+63:i] := a[id+63:id] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Shuffle double-precision (64-bit) floating-point elements in "a" within 256-bit - lanes using the control in "imm8", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[63:0] := src[63:0] - 1: tmp[63:0] := src[127:64] - 2: tmp[63:0] := src[191:128] - 3: tmp[63:0] := src[255:192] - ESAC - RETURN tmp[63:0] - } - tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0]) - tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2]) - tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4]) - tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6]) - tmp_dst[319:256] := SELECT4(a[511:256], imm8[1:0]) - tmp_dst[383:320] := SELECT4(a[511:256], imm8[3:2]) - tmp_dst[447:384] := SELECT4(a[511:256], imm8[5:4]) - tmp_dst[511:448] := SELECT4(a[511:256], imm8[7:6]) - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 256-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} +tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +tmp_dst[319:256] := SELECT4(a[511:256], imm8[1:0]) +tmp_dst[383:320] := SELECT4(a[511:256], imm8[3:2]) +tmp_dst[447:384] := SELECT4(a[511:256], imm8[5:4]) +tmp_dst[511:448] := SELECT4(a[511:256], imm8[7:6]) +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Shuffle double-precision (64-bit) floating-point elements in "a" across lanes - using the corresponding index in "idx", and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - id := idx[i+2:i]*64 - IF k[j] - dst[i+63:i] := a[id+63:id] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + id := idx[i+2:i]*64 + IF k[j] + dst[i+63:i] := a[id+63:id] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Shuffle double-precision (64-bit) floating-point elements in "a" within 256-bit - lanes using the control in "imm8", and store the results in "dst". - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[63:0] := src[63:0] - 1: tmp[63:0] := src[127:64] - 2: tmp[63:0] := src[191:128] - 3: tmp[63:0] := src[255:192] - ESAC - RETURN tmp[63:0] - } - dst[63:0] := SELECT4(a[255:0], imm8[1:0]) - dst[127:64] := SELECT4(a[255:0], imm8[3:2]) - dst[191:128] := SELECT4(a[255:0], imm8[5:4]) - dst[255:192] := SELECT4(a[255:0], imm8[7:6]) - dst[319:256] := SELECT4(a[511:256], imm8[1:0]) - dst[383:320] := SELECT4(a[511:256], imm8[3:2]) - dst[447:384] := SELECT4(a[511:256], imm8[5:4]) - dst[511:448] := SELECT4(a[511:256], imm8[7:6]) - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Shuffle double-precision (64-bit) floating-point elements in "a" within 256-bit lanes using the control in "imm8", and store the results in "dst". + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} +dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +dst[319:256] := SELECT4(a[511:256], imm8[1:0]) +dst[383:320] := SELECT4(a[511:256], imm8[3:2]) +dst[447:384] := SELECT4(a[511:256], imm8[5:4]) +dst[511:448] := SELECT4(a[511:256], imm8[7:6]) +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Shuffle double-precision (64-bit) floating-point elements in "a" across lanes - using the corresponding index in "idx", and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - id := idx[i+2:i]*64 - dst[i+63:i] := a[id+63:id] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + id := idx[i+2:i]*64 + dst[i+63:i] := a[id+63:id] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle single-precision (32-bit) floating-point elements in "a" across lanes - using the corresponding index in "idx", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - id := idx[i+3:i]*32 - IF k[j] - dst[i+31:i] := a[id+31:id] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + id := idx[i+3:i]*32 + IF k[j] + dst[i+31:i] := a[id+31:id] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Shuffle single-precision (32-bit) floating-point elements in "a" across lanes - using the corresponding index in "idx", and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - id := idx[i+3:i]*32 - IF k[j] - dst[i+31:i] := a[id+31:id] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + id := idx[i+3:i]*32 + IF k[j] + dst[i+31:i] := a[id+31:id] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Shuffle single-precision (32-bit) floating-point elements in "a" across lanes - using the corresponding index in "idx". - - FOR j := 0 to 15 - i := j*32 - id := idx[i+3:i]*32 - dst[i+31:i] := a[id+31:id] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Shuffle single-precision (32-bit) floating-point elements in "a" across lanes using the corresponding index in "idx". + +FOR j := 0 to 15 + i := j*32 + id := idx[i+3:i]*32 + dst[i+31:i] := a[id+31:id] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle 64-bit integers in "a" within 256-bit lanes using the control in - "imm8", and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[63:0] := src[63:0] - 1: tmp[63:0] := src[127:64] - 2: tmp[63:0] := src[191:128] - 3: tmp[63:0] := src[255:192] - ESAC - RETURN tmp[63:0] - } - tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0]) - tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2]) - tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4]) - tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6]) - tmp_dst[319:256] := SELECT4(a[511:256], imm8[1:0]) - tmp_dst[383:320] := SELECT4(a[511:256], imm8[3:2]) - tmp_dst[447:384] := SELECT4(a[511:256], imm8[5:4]) - tmp_dst[511:448] := SELECT4(a[511:256], imm8[7:6]) - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle 64-bit integers in "a" within 256-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} +tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +tmp_dst[319:256] := SELECT4(a[511:256], imm8[1:0]) +tmp_dst[383:320] := SELECT4(a[511:256], imm8[3:2]) +tmp_dst[447:384] := SELECT4(a[511:256], imm8[5:4]) +tmp_dst[511:448] := SELECT4(a[511:256], imm8[7:6]) +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle 64-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - id := idx[i+2:i]*64 - IF k[j] - dst[i+63:i] := a[id+63:id] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle 64-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + id := idx[i+2:i]*64 + IF k[j] + dst[i+63:i] := a[id+63:id] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Shuffle 64-bit integers in "a" within 256-bit lanes using the control in - "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[63:0] := src[63:0] - 1: tmp[63:0] := src[127:64] - 2: tmp[63:0] := src[191:128] - 3: tmp[63:0] := src[255:192] - ESAC - RETURN tmp[63:0] - } - tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0]) - tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2]) - tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4]) - tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6]) - tmp_dst[319:256] := SELECT4(a[511:256], imm8[1:0]) - tmp_dst[383:320] := SELECT4(a[511:256], imm8[3:2]) - tmp_dst[447:384] := SELECT4(a[511:256], imm8[5:4]) - tmp_dst[511:448] := SELECT4(a[511:256], imm8[7:6]) - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Shuffle 64-bit integers in "a" within 256-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} +tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +tmp_dst[319:256] := SELECT4(a[511:256], imm8[1:0]) +tmp_dst[383:320] := SELECT4(a[511:256], imm8[3:2]) +tmp_dst[447:384] := SELECT4(a[511:256], imm8[5:4]) +tmp_dst[511:448] := SELECT4(a[511:256], imm8[7:6]) +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Shuffle 64-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - id := idx[i+2:i]*64 - IF k[j] - dst[i+63:i] := a[id+63:id] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Shuffle 64-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + id := idx[i+2:i]*64 + IF k[j] + dst[i+63:i] := a[id+63:id] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Shuffle 64-bit integers in "a" within 256-bit lanes using the control in - "imm8", and store the results in "dst". - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[63:0] := src[63:0] - 1: tmp[63:0] := src[127:64] - 2: tmp[63:0] := src[191:128] - 3: tmp[63:0] := src[255:192] - ESAC - RETURN tmp[63:0] - } - dst[63:0] := SELECT4(a[255:0], imm8[1:0]) - dst[127:64] := SELECT4(a[255:0], imm8[3:2]) - dst[191:128] := SELECT4(a[255:0], imm8[5:4]) - dst[255:192] := SELECT4(a[255:0], imm8[7:6]) - dst[319:256] := SELECT4(a[511:256], imm8[1:0]) - dst[383:320] := SELECT4(a[511:256], imm8[3:2]) - dst[447:384] := SELECT4(a[511:256], imm8[5:4]) - dst[511:448] := SELECT4(a[511:256], imm8[7:6]) - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Shuffle 64-bit integers in "a" within 256-bit lanes using the control in "imm8", and store the results in "dst". + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[63:0] := src[63:0] + 1: tmp[63:0] := src[127:64] + 2: tmp[63:0] := src[191:128] + 3: tmp[63:0] := src[255:192] + ESAC + RETURN tmp[63:0] +} +dst[63:0] := SELECT4(a[255:0], imm8[1:0]) +dst[127:64] := SELECT4(a[255:0], imm8[3:2]) +dst[191:128] := SELECT4(a[255:0], imm8[5:4]) +dst[255:192] := SELECT4(a[255:0], imm8[7:6]) +dst[319:256] := SELECT4(a[511:256], imm8[1:0]) +dst[383:320] := SELECT4(a[511:256], imm8[3:2]) +dst[447:384] := SELECT4(a[511:256], imm8[5:4]) +dst[511:448] := SELECT4(a[511:256], imm8[7:6]) +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Shuffle 64-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - id := idx[i+2:i]*64 - dst[i+63:i] := a[id+63:id] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Shuffle 64-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + id := idx[i+2:i]*64 + dst[i+63:i] := a[id+63:id] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Load contiguous active 32-bit integers from "a" (those with their respective - bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[m+31:m] - m := m + 32 - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Load contiguous active 32-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[m+31:m] + m := m + 32 + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Load contiguous active 32-bit integers from "a" (those with their respective - bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[m+31:m] - m := m + 32 - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Load contiguous active 32-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[m+31:m] + m := m + 32 + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Load contiguous active 64-bit integers from "a" (those with their respective - bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[m+63:m] - m := m + 64 - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Load contiguous active 64-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[m+63:m] + m := m + 64 + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Load contiguous active 64-bit integers from "a" (those with their respective - bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[m+63:m] - m := m + 64 - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Load contiguous active 64-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[m+63:m] + m := m + 64 + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Shuffle 32-bit integers in "a" within 128-bit lanes using the control in - "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) - tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) - tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) - tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) - tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4]) - tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6]) - tmp_dst[287:256] := SELECT4(a[383:256], imm8[1:0]) - tmp_dst[319:288] := SELECT4(a[383:256], imm8[3:2]) - tmp_dst[351:320] := SELECT4(a[383:256], imm8[5:4]) - tmp_dst[383:352] := SELECT4(a[383:256], imm8[7:6]) - tmp_dst[415:384] := SELECT4(a[511:384], imm8[1:0]) - tmp_dst[447:416] := SELECT4(a[511:384], imm8[3:2]) - tmp_dst[479:448] := SELECT4(a[511:384], imm8[5:4]) - tmp_dst[511:480] := SELECT4(a[511:384], imm8[7:6]) - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Shuffle 32-bit integers in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4]) +tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6]) +tmp_dst[287:256] := SELECT4(a[383:256], imm8[1:0]) +tmp_dst[319:288] := SELECT4(a[383:256], imm8[3:2]) +tmp_dst[351:320] := SELECT4(a[383:256], imm8[5:4]) +tmp_dst[383:352] := SELECT4(a[383:256], imm8[7:6]) +tmp_dst[415:384] := SELECT4(a[511:384], imm8[1:0]) +tmp_dst[447:416] := SELECT4(a[511:384], imm8[3:2]) +tmp_dst[479:448] := SELECT4(a[511:384], imm8[5:4]) +tmp_dst[511:480] := SELECT4(a[511:384], imm8[7:6]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Unpack and interleave 32-bit integers from the high half of each 128-bit lane - in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[95:64] - dst[63:32] := src2[95:64] - dst[95:64] := src1[127:96] - dst[127:96] := src2[127:96] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) - tmp_dst[383:256] := INTERLEAVE_HIGH_DWORDS(a[383:256], b[383:256]) - tmp_dst[511:384] := INTERLEAVE_HIGH_DWORDS(a[511:384], b[511:384]) - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Unpack and interleave 32-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_HIGH_DWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_HIGH_DWORDS(a[511:384], b[511:384]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Unpack and interleave 32-bit integers from the high half of each 128-bit lane - in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[95:64] - dst[63:32] := src2[95:64] - dst[95:64] := src1[127:96] - dst[127:96] := src2[127:96] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) - tmp_dst[383:256] := INTERLEAVE_HIGH_DWORDS(a[383:256], b[383:256]) - tmp_dst[511:384] := INTERLEAVE_HIGH_DWORDS(a[511:384], b[511:384]) - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Unpack and interleave 32-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_HIGH_DWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_HIGH_DWORDS(a[511:384], b[511:384]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Unpack and interleave 32-bit integers from the high half of each 128-bit lane - in "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[95:64] - dst[63:32] := src2[95:64] - dst[95:64] := src1[127:96] - dst[127:96] := src2[127:96] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) - dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) - dst[383:256] := INTERLEAVE_HIGH_DWORDS(a[383:256], b[383:256]) - dst[511:384] := INTERLEAVE_HIGH_DWORDS(a[511:384], b[511:384]) - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Unpack and interleave 32-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) +dst[383:256] := INTERLEAVE_HIGH_DWORDS(a[383:256], b[383:256]) +dst[511:384] := INTERLEAVE_HIGH_DWORDS(a[511:384], b[511:384]) +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Unpack and interleave 64-bit integers from the high half of each 128-bit lane - in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[127:64] - dst[127:64] := src2[127:64] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) - tmp_dst[383:256] := INTERLEAVE_HIGH_QWORDS(a[383:256], b[383:256]) - tmp_dst[511:384] := INTERLEAVE_HIGH_QWORDS(a[511:384], b[511:384]) - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Unpack and interleave 64-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_HIGH_QWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_HIGH_QWORDS(a[511:384], b[511:384]) +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Unpack and interleave 64-bit integers from the high half of each 128-bit lane - in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[127:64] - dst[127:64] := src2[127:64] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) - tmp_dst[383:256] := INTERLEAVE_HIGH_QWORDS(a[383:256], b[383:256]) - tmp_dst[511:384] := INTERLEAVE_HIGH_QWORDS(a[511:384], b[511:384]) - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Unpack and interleave 64-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_HIGH_QWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_HIGH_QWORDS(a[511:384], b[511:384]) +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Unpack and interleave 64-bit integers from the high half of each 128-bit lane - in "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[127:64] - dst[127:64] := src2[127:64] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) - dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) - dst[383:256] := INTERLEAVE_HIGH_QWORDS(a[383:256], b[383:256]) - dst[511:384] := INTERLEAVE_HIGH_QWORDS(a[511:384], b[511:384]) - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Unpack and interleave 64-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) +dst[383:256] := INTERLEAVE_HIGH_QWORDS(a[383:256], b[383:256]) +dst[511:384] := INTERLEAVE_HIGH_QWORDS(a[511:384], b[511:384]) +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Unpack and interleave 32-bit integers from the low half of each 128-bit lane in - "a" and "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[31:0] - dst[63:32] := src2[31:0] - dst[95:64] := src1[63:32] - dst[127:96] := src2[63:32] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) - tmp_dst[383:256] := INTERLEAVE_DWORDS(a[383:256], b[383:256]) - tmp_dst[511:384] := INTERLEAVE_DWORDS(a[511:384], b[511:384]) - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Unpack and interleave 32-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_DWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_DWORDS(a[511:384], b[511:384]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Unpack and interleave 32-bit integers from the low half of each 128-bit lane in - "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[31:0] - dst[63:32] := src2[31:0] - dst[95:64] := src1[63:32] - dst[127:96] := src2[63:32] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) - tmp_dst[383:256] := INTERLEAVE_DWORDS(a[383:256], b[383:256]) - tmp_dst[511:384] := INTERLEAVE_DWORDS(a[511:384], b[511:384]) - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Unpack and interleave 32-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_DWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_DWORDS(a[511:384], b[511:384]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Unpack and interleave 32-bit integers from the low half of each 128-bit lane in - "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[31:0] - dst[63:32] := src2[31:0] - dst[95:64] := src1[63:32] - dst[127:96] := src2[63:32] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) - dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) - dst[383:256] := INTERLEAVE_DWORDS(a[383:256], b[383:256]) - dst[511:384] := INTERLEAVE_DWORDS(a[511:384], b[511:384]) - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Unpack and interleave 32-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) +dst[383:256] := INTERLEAVE_DWORDS(a[383:256], b[383:256]) +dst[511:384] := INTERLEAVE_DWORDS(a[511:384], b[511:384]) +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Unpack and interleave 64-bit integers from the low half of each 128-bit lane in - "a" and "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[63:0] - dst[127:64] := src2[63:0] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) - tmp_dst[383:256] := INTERLEAVE_QWORDS(a[383:256], b[383:256]) - tmp_dst[511:384] := INTERLEAVE_QWORDS(a[511:384], b[511:384]) - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Unpack and interleave 64-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_QWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_QWORDS(a[511:384], b[511:384]) +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Unpack and interleave 64-bit integers from the low half of each 128-bit lane in - "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[63:0] - dst[127:64] := src2[63:0] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) - tmp_dst[383:256] := INTERLEAVE_QWORDS(a[383:256], b[383:256]) - tmp_dst[511:384] := INTERLEAVE_QWORDS(a[511:384], b[511:384]) - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Unpack and interleave 64-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_QWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_QWORDS(a[511:384], b[511:384]) +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Unpack and interleave 64-bit integers from the low half of each 128-bit lane in - "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[63:0] - dst[127:64] := src2[63:0] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) - dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) - dst[383:256] := INTERLEAVE_QWORDS(a[383:256], b[383:256]) - dst[511:384] := INTERLEAVE_QWORDS(a[511:384], b[511:384]) - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Unpack and interleave 64-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) +dst[383:256] := INTERLEAVE_QWORDS(a[383:256], b[383:256]) +dst[511:384] := INTERLEAVE_QWORDS(a[511:384], b[511:384]) +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - - Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point - elements) selected by "imm8" from "a" and "b", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[127:0] := src[127:0] - 1: tmp[127:0] := src[255:128] - 2: tmp[127:0] := src[383:256] - 3: tmp[127:0] := src[511:384] - ESAC - RETURN tmp[127:0] - } - tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0]) - tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2]) - tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4]) - tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6]) - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + + Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} +tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0]) +tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2]) +tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4]) +tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point - elements) selected by "imm8" from "a" and "b", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[127:0] := src[127:0] - 1: tmp[127:0] := src[255:128] - 2: tmp[127:0] := src[383:256] - 3: tmp[127:0] := src[511:384] - ESAC - RETURN tmp[127:0] - } - tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0]) - tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2]) - tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4]) - tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6]) - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} +tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0]) +tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2]) +tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4]) +tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point - elements) selected by "imm8" from "a" and "b", and store the results in "dst". - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[127:0] := src[127:0] - 1: tmp[127:0] := src[255:128] - 2: tmp[127:0] := src[383:256] - 3: tmp[127:0] := src[511:384] - ESAC - RETURN tmp[127:0] - } - dst[127:0] := SELECT4(a[511:0], imm8[1:0]) - dst[255:128] := SELECT4(a[511:0], imm8[3:2]) - dst[383:256] := SELECT4(b[511:0], imm8[5:4]) - dst[511:384] := SELECT4(b[511:0], imm8[7:6]) - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst". + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} +dst[127:0] := SELECT4(a[511:0], imm8[1:0]) +dst[255:128] := SELECT4(a[511:0], imm8[3:2]) +dst[383:256] := SELECT4(b[511:0], imm8[5:4]) +dst[511:384] := SELECT4(b[511:0], imm8[7:6]) +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - - Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point - elements) selected by "imm8" from "a" and "b", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[127:0] := src[127:0] - 1: tmp[127:0] := src[255:128] - 2: tmp[127:0] := src[383:256] - 3: tmp[127:0] := src[511:384] - ESAC - RETURN tmp[127:0] - } - tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0]) - tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2]) - tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4]) - tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6]) - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + + Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} +tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0]) +tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2]) +tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4]) +tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6]) +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point - elements) selected by "imm8" from "a" and "b", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[127:0] := src[127:0] - 1: tmp[127:0] := src[255:128] - 2: tmp[127:0] := src[383:256] - 3: tmp[127:0] := src[511:384] - ESAC - RETURN tmp[127:0] - } - tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0]) - tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2]) - tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4]) - tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6]) - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} +tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0]) +tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2]) +tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4]) +tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6]) +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point - elements) selected by "imm8" from "a" and "b", and store the results in "dst". - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[127:0] := src[127:0] - 1: tmp[127:0] := src[255:128] - 2: tmp[127:0] := src[383:256] - 3: tmp[127:0] := src[511:384] - ESAC - RETURN tmp[127:0] - } - dst[127:0] := SELECT4(a[511:0], imm8[1:0]) - dst[255:128] := SELECT4(a[511:0], imm8[3:2]) - dst[383:256] := SELECT4(b[511:0], imm8[5:4]) - dst[511:384] := SELECT4(b[511:0], imm8[7:6]) - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst". + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} +dst[127:0] := SELECT4(a[511:0], imm8[1:0]) +dst[255:128] := SELECT4(a[511:0], imm8[3:2]) +dst[383:256] := SELECT4(b[511:0], imm8[5:4]) +dst[511:384] := SELECT4(b[511:0], imm8[7:6]) +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - - Shuffle 128-bits (composed of 4 32-bit integers) selected by "imm8" from "a" - and "b", and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[127:0] := src[127:0] - 1: tmp[127:0] := src[255:128] - 2: tmp[127:0] := src[383:256] - 3: tmp[127:0] := src[511:384] - ESAC - RETURN tmp[127:0] - } - tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0]) - tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2]) - tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4]) - tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6]) - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + + Shuffle 128-bits (composed of 4 32-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} +tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0]) +tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2]) +tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4]) +tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle 128-bits (composed of 4 32-bit integers) selected by "imm8" from "a" - and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[127:0] := src[127:0] - 1: tmp[127:0] := src[255:128] - 2: tmp[127:0] := src[383:256] - 3: tmp[127:0] := src[511:384] - ESAC - RETURN tmp[127:0] - } - tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0]) - tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2]) - tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4]) - tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6]) - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle 128-bits (composed of 4 32-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} +tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0]) +tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2]) +tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4]) +tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Shuffle 128-bits (composed of 4 32-bit integers) selected by "imm8" from "a" - and "b", and store the results in "dst". - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[127:0] := src[127:0] - 1: tmp[127:0] := src[255:128] - 2: tmp[127:0] := src[383:256] - 3: tmp[127:0] := src[511:384] - ESAC - RETURN tmp[127:0] - } - dst[127:0] := SELECT4(a[511:0], imm8[1:0]) - dst[255:128] := SELECT4(a[511:0], imm8[3:2]) - dst[383:256] := SELECT4(b[511:0], imm8[5:4]) - dst[511:384] := SELECT4(b[511:0], imm8[7:6]) - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Shuffle 128-bits (composed of 4 32-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst". + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} +dst[127:0] := SELECT4(a[511:0], imm8[1:0]) +dst[255:128] := SELECT4(a[511:0], imm8[3:2]) +dst[383:256] := SELECT4(b[511:0], imm8[5:4]) +dst[511:384] := SELECT4(b[511:0], imm8[7:6]) +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - - Shuffle 128-bits (composed of 2 64-bit integers) selected by "imm8" from "a" - and "b", and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[127:0] := src[127:0] - 1: tmp[127:0] := src[255:128] - 2: tmp[127:0] := src[383:256] - 3: tmp[127:0] := src[511:384] - ESAC - RETURN tmp[127:0] - } - tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0]) - tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2]) - tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4]) - tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6]) - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + + Shuffle 128-bits (composed of 2 64-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} +tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0]) +tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2]) +tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4]) +tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6]) +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle 128-bits (composed of 2 64-bit integers) selected by "imm8" from "a" - and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[127:0] := src[127:0] - 1: tmp[127:0] := src[255:128] - 2: tmp[127:0] := src[383:256] - 3: tmp[127:0] := src[511:384] - ESAC - RETURN tmp[127:0] - } - tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0]) - tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2]) - tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4]) - tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6]) - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle 128-bits (composed of 2 64-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} +tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0]) +tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2]) +tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4]) +tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6]) +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Shuffle 128-bits (composed of 2 64-bit integers) selected by "imm8" from "a" - and "b", and store the results in "dst". - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[127:0] := src[127:0] - 1: tmp[127:0] := src[255:128] - 2: tmp[127:0] := src[383:256] - 3: tmp[127:0] := src[511:384] - ESAC - RETURN tmp[127:0] - } - dst[127:0] := SELECT4(a[511:0], imm8[1:0]) - dst[255:128] := SELECT4(a[511:0], imm8[3:2]) - dst[383:256] := SELECT4(b[511:0], imm8[5:4]) - dst[511:384] := SELECT4(b[511:0], imm8[7:6]) - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Shuffle 128-bits (composed of 2 64-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst". + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[127:0] := src[127:0] + 1: tmp[127:0] := src[255:128] + 2: tmp[127:0] := src[383:256] + 3: tmp[127:0] := src[511:384] + ESAC + RETURN tmp[127:0] +} +dst[127:0] := SELECT4(a[511:0], imm8[1:0]) +dst[255:128] := SELECT4(a[511:0], imm8[3:2]) +dst[383:256] := SELECT4(b[511:0], imm8[5:4]) +dst[511:384] := SELECT4(b[511:0], imm8[7:6]) +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - - Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes - using the control in "imm8", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - tmp_dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] - tmp_dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] - tmp_dst[191:128] := (imm8[2] == 0) ? a[191:128] : a[255:192] - tmp_dst[255:192] := (imm8[3] == 0) ? b[191:128] : b[255:192] - tmp_dst[319:256] := (imm8[4] == 0) ? a[319:256] : a[383:320] - tmp_dst[383:320] := (imm8[5] == 0) ? b[319:256] : b[383:320] - tmp_dst[447:384] := (imm8[6] == 0) ? a[447:384] : a[511:448] - tmp_dst[511:448] := (imm8[7] == 0) ? b[447:384] : b[511:448] - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + + Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp_dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] +tmp_dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] +tmp_dst[191:128] := (imm8[2] == 0) ? a[191:128] : a[255:192] +tmp_dst[255:192] := (imm8[3] == 0) ? b[191:128] : b[255:192] +tmp_dst[319:256] := (imm8[4] == 0) ? a[319:256] : a[383:320] +tmp_dst[383:320] := (imm8[5] == 0) ? b[319:256] : b[383:320] +tmp_dst[447:384] := (imm8[6] == 0) ? a[447:384] : a[511:448] +tmp_dst[511:448] := (imm8[7] == 0) ? b[447:384] : b[511:448] +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes - using the control in "imm8", and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - tmp_dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] - tmp_dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] - tmp_dst[191:128] := (imm8[2] == 0) ? a[191:128] : a[255:192] - tmp_dst[255:192] := (imm8[3] == 0) ? b[191:128] : b[255:192] - tmp_dst[319:256] := (imm8[4] == 0) ? a[319:256] : a[383:320] - tmp_dst[383:320] := (imm8[5] == 0) ? b[319:256] : b[383:320] - tmp_dst[447:384] := (imm8[6] == 0) ? a[447:384] : a[511:448] - tmp_dst[511:448] := (imm8[7] == 0) ? b[447:384] : b[511:448] - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp_dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] +tmp_dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] +tmp_dst[191:128] := (imm8[2] == 0) ? a[191:128] : a[255:192] +tmp_dst[255:192] := (imm8[3] == 0) ? b[191:128] : b[255:192] +tmp_dst[319:256] := (imm8[4] == 0) ? a[319:256] : a[383:320] +tmp_dst[383:320] := (imm8[5] == 0) ? b[319:256] : b[383:320] +tmp_dst[447:384] := (imm8[6] == 0) ? a[447:384] : a[511:448] +tmp_dst[511:448] := (imm8[7] == 0) ? b[447:384] : b[511:448] +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes - using the control in "imm8", and store the results in "dst". - - dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] - dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] - dst[191:128] := (imm8[2] == 0) ? a[191:128] : a[255:192] - dst[255:192] := (imm8[3] == 0) ? b[191:128] : b[255:192] - dst[319:256] := (imm8[4] == 0) ? a[319:256] : a[383:320] - dst[383:320] := (imm8[5] == 0) ? b[319:256] : b[383:320] - dst[447:384] := (imm8[6] == 0) ? a[447:384] : a[511:448] - dst[511:448] := (imm8[7] == 0) ? b[447:384] : b[511:448] - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes using the control in "imm8", and store the results in "dst". + +dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] +dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] +dst[191:128] := (imm8[2] == 0) ? a[191:128] : a[255:192] +dst[255:192] := (imm8[3] == 0) ? b[191:128] : b[255:192] +dst[319:256] := (imm8[4] == 0) ? a[319:256] : a[383:320] +dst[383:320] := (imm8[5] == 0) ? b[319:256] : b[383:320] +dst[447:384] := (imm8[6] == 0) ? a[447:384] : a[511:448] +dst[511:448] := (imm8[7] == 0) ? b[447:384] : b[511:448] +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - - Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit - lanes using the control in "imm8", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - tmp_dst[95:64] := SELECT4(b[127:0], imm8[5:4]) - tmp_dst[127:96] := SELECT4(b[127:0], imm8[7:6]) - tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) - tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) - tmp_dst[223:192] := SELECT4(b[255:128], imm8[5:4]) - tmp_dst[255:224] := SELECT4(b[255:128], imm8[7:6]) - tmp_dst[287:256] := SELECT4(a[383:256], imm8[1:0]) - tmp_dst[319:288] := SELECT4(a[383:256], imm8[3:2]) - tmp_dst[351:320] := SELECT4(b[383:256], imm8[5:4]) - tmp_dst[383:352] := SELECT4(b[383:256], imm8[7:6]) - tmp_dst[415:384] := SELECT4(a[511:384], imm8[1:0]) - tmp_dst[447:416] := SELECT4(a[511:384], imm8[3:2]) - tmp_dst[479:448] := SELECT4(b[511:384], imm8[5:4]) - tmp_dst[511:480] := SELECT4(b[511:384], imm8[7:6]) - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(b[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(b[127:0], imm8[7:6]) +tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +tmp_dst[223:192] := SELECT4(b[255:128], imm8[5:4]) +tmp_dst[255:224] := SELECT4(b[255:128], imm8[7:6]) +tmp_dst[287:256] := SELECT4(a[383:256], imm8[1:0]) +tmp_dst[319:288] := SELECT4(a[383:256], imm8[3:2]) +tmp_dst[351:320] := SELECT4(b[383:256], imm8[5:4]) +tmp_dst[383:352] := SELECT4(b[383:256], imm8[7:6]) +tmp_dst[415:384] := SELECT4(a[511:384], imm8[1:0]) +tmp_dst[447:416] := SELECT4(a[511:384], imm8[3:2]) +tmp_dst[479:448] := SELECT4(b[511:384], imm8[5:4]) +tmp_dst[511:480] := SELECT4(b[511:384], imm8[7:6]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit - lanes using the control in "imm8", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - tmp_dst[95:64] := SELECT4(b[127:0], imm8[5:4]) - tmp_dst[127:96] := SELECT4(b[127:0], imm8[7:6]) - tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) - tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) - tmp_dst[223:192] := SELECT4(b[255:128], imm8[5:4]) - tmp_dst[255:224] := SELECT4(b[255:128], imm8[7:6]) - tmp_dst[287:256] := SELECT4(a[383:256], imm8[1:0]) - tmp_dst[319:288] := SELECT4(a[383:256], imm8[3:2]) - tmp_dst[351:320] := SELECT4(b[383:256], imm8[5:4]) - tmp_dst[383:352] := SELECT4(b[383:256], imm8[7:6]) - tmp_dst[415:384] := SELECT4(a[511:384], imm8[1:0]) - tmp_dst[447:416] := SELECT4(a[511:384], imm8[3:2]) - tmp_dst[479:448] := SELECT4(b[511:384], imm8[5:4]) - tmp_dst[511:480] := SELECT4(b[511:384], imm8[7:6]) - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(b[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(b[127:0], imm8[7:6]) +tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +tmp_dst[223:192] := SELECT4(b[255:128], imm8[5:4]) +tmp_dst[255:224] := SELECT4(b[255:128], imm8[7:6]) +tmp_dst[287:256] := SELECT4(a[383:256], imm8[1:0]) +tmp_dst[319:288] := SELECT4(a[383:256], imm8[3:2]) +tmp_dst[351:320] := SELECT4(b[383:256], imm8[5:4]) +tmp_dst[383:352] := SELECT4(b[383:256], imm8[7:6]) +tmp_dst[415:384] := SELECT4(a[511:384], imm8[1:0]) +tmp_dst[447:416] := SELECT4(a[511:384], imm8[3:2]) +tmp_dst[479:448] := SELECT4(b[511:384], imm8[5:4]) +tmp_dst[511:480] := SELECT4(b[511:384], imm8[7:6]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit - lanes using the control in "imm8", and store the results in "dst". - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - dst[95:64] := SELECT4(b[127:0], imm8[5:4]) - dst[127:96] := SELECT4(b[127:0], imm8[7:6]) - dst[159:128] := SELECT4(a[255:128], imm8[1:0]) - dst[191:160] := SELECT4(a[255:128], imm8[3:2]) - dst[223:192] := SELECT4(b[255:128], imm8[5:4]) - dst[255:224] := SELECT4(b[255:128], imm8[7:6]) - dst[287:256] := SELECT4(a[383:256], imm8[1:0]) - dst[319:288] := SELECT4(a[383:256], imm8[3:2]) - dst[351:320] := SELECT4(b[383:256], imm8[5:4]) - dst[383:352] := SELECT4(b[383:256], imm8[7:6]) - dst[415:384] := SELECT4(a[511:384], imm8[1:0]) - dst[447:416] := SELECT4(a[511:384], imm8[3:2]) - dst[479:448] := SELECT4(b[511:384], imm8[5:4]) - dst[511:480] := SELECT4(b[511:384], imm8[7:6]) - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst". + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +dst[95:64] := SELECT4(b[127:0], imm8[5:4]) +dst[127:96] := SELECT4(b[127:0], imm8[7:6]) +dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +dst[223:192] := SELECT4(b[255:128], imm8[5:4]) +dst[255:224] := SELECT4(b[255:128], imm8[7:6]) +dst[287:256] := SELECT4(a[383:256], imm8[1:0]) +dst[319:288] := SELECT4(a[383:256], imm8[3:2]) +dst[351:320] := SELECT4(b[383:256], imm8[5:4]) +dst[383:352] := SELECT4(b[383:256], imm8[7:6]) +dst[415:384] := SELECT4(a[511:384], imm8[1:0]) +dst[447:416] := SELECT4(a[511:384], imm8[3:2]) +dst[479:448] := SELECT4(b[511:384], imm8[5:4]) +dst[511:480] := SELECT4(b[511:384], imm8[7:6]) +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Unpack and interleave double-precision (64-bit) floating-point elements from - the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[127:64] - dst[127:64] := src2[127:64] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) - tmp_dst[383:256] := INTERLEAVE_HIGH_QWORDS(a[383:256], b[383:256]) - tmp_dst[511:384] := INTERLEAVE_HIGH_QWORDS(a[511:384], b[511:384]) - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_HIGH_QWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_HIGH_QWORDS(a[511:384], b[511:384]) +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Unpack and interleave double-precision (64-bit) floating-point elements from - the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[127:64] - dst[127:64] := src2[127:64] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) - tmp_dst[383:256] := INTERLEAVE_HIGH_QWORDS(a[383:256], b[383:256]) - tmp_dst[511:384] := INTERLEAVE_HIGH_QWORDS(a[511:384], b[511:384]) - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_HIGH_QWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_HIGH_QWORDS(a[511:384], b[511:384]) +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Unpack and interleave double-precision (64-bit) floating-point elements from - the high half of each 128-bit lane in "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[127:64] - dst[127:64] := src2[127:64] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) - dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) - dst[383:256] := INTERLEAVE_HIGH_QWORDS(a[383:256], b[383:256]) - dst[511:384] := INTERLEAVE_HIGH_QWORDS(a[511:384], b[511:384]) - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128]) +dst[383:256] := INTERLEAVE_HIGH_QWORDS(a[383:256], b[383:256]) +dst[511:384] := INTERLEAVE_HIGH_QWORDS(a[511:384], b[511:384]) +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Unpack and interleave single-precision (32-bit) floating-point elements from - the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[95:64] - dst[63:32] := src2[95:64] - dst[95:64] := src1[127:96] - dst[127:96] := src2[127:96] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) - tmp_dst[383:256] := INTERLEAVE_HIGH_DWORDS(a[383:256], b[383:256]) - tmp_dst[511:384] := INTERLEAVE_HIGH_DWORDS(a[511:384], b[511:384]) - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_HIGH_DWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_HIGH_DWORDS(a[511:384], b[511:384]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Unpack and interleave single-precision (32-bit) floating-point elements from - the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[95:64] - dst[63:32] := src2[95:64] - dst[95:64] := src1[127:96] - dst[127:96] := src2[127:96] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) - tmp_dst[383:256] := INTERLEAVE_HIGH_DWORDS(a[383:256], b[383:256]) - tmp_dst[511:384] := INTERLEAVE_HIGH_DWORDS(a[511:384], b[511:384]) - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_HIGH_DWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_HIGH_DWORDS(a[511:384], b[511:384]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Unpack and interleave single-precision (32-bit) floating-point elements from - the high half of each 128-bit lane in "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[95:64] - dst[63:32] := src2[95:64] - dst[95:64] := src1[127:96] - dst[127:96] := src2[127:96] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) - dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) - dst[383:256] := INTERLEAVE_HIGH_DWORDS(a[383:256], b[383:256]) - dst[511:384] := INTERLEAVE_HIGH_DWORDS(a[511:384], b[511:384]) - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128]) +dst[383:256] := INTERLEAVE_HIGH_DWORDS(a[383:256], b[383:256]) +dst[511:384] := INTERLEAVE_HIGH_DWORDS(a[511:384], b[511:384]) +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Unpack and interleave double-precision (64-bit) floating-point elements from - the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[63:0] - dst[127:64] := src2[63:0] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) - tmp_dst[383:256] := INTERLEAVE_QWORDS(a[383:256], b[383:256]) - tmp_dst[511:384] := INTERLEAVE_QWORDS(a[511:384], b[511:384]) - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_QWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_QWORDS(a[511:384], b[511:384]) +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Unpack and interleave double-precision (64-bit) floating-point elements from - the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[63:0] - dst[127:64] := src2[63:0] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) - tmp_dst[383:256] := INTERLEAVE_QWORDS(a[383:256], b[383:256]) - tmp_dst[511:384] := INTERLEAVE_QWORDS(a[511:384], b[511:384]) - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp_dst[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_QWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_QWORDS(a[511:384], b[511:384]) +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp_dst[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Unpack and interleave double-precision (64-bit) floating-point elements from - the low half of each 128-bit lane in "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[63:0] - dst[127:64] := src2[63:0] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) - dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) - dst[383:256] := INTERLEAVE_QWORDS(a[383:256], b[383:256]) - dst[511:384] := INTERLEAVE_QWORDS(a[511:384], b[511:384]) - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128]) +dst[383:256] := INTERLEAVE_QWORDS(a[383:256], b[383:256]) +dst[511:384] := INTERLEAVE_QWORDS(a[511:384], b[511:384]) +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Unpack and interleave single-precision (32-bit) floating-point elements from - the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[31:0] - dst[63:32] := src2[31:0] - dst[95:64] := src1[63:32] - dst[127:96] := src2[63:32] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) - tmp_dst[383:256] := INTERLEAVE_DWORDS(a[383:256], b[383:256]) - tmp_dst[511:384] := INTERLEAVE_DWORDS(a[511:384], b[511:384]) - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_DWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_DWORDS(a[511:384], b[511:384]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Unpack and interleave single-precision (32-bit) floating-point elements from - the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[31:0] - dst[63:32] := src2[31:0] - dst[95:64] := src1[63:32] - dst[127:96] := src2[63:32] - RETURN dst[127:0] - } - tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) - tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) - tmp_dst[383:256] := INTERLEAVE_DWORDS(a[383:256], b[383:256]) - tmp_dst[511:384] := INTERLEAVE_DWORDS(a[511:384], b[511:384]) - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} +tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) +tmp_dst[383:256] := INTERLEAVE_DWORDS(a[383:256], b[383:256]) +tmp_dst[511:384] := INTERLEAVE_DWORDS(a[511:384], b[511:384]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Unpack and interleave single-precision (32-bit) floating-point elements from - the low half of each 128-bit lane in "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[31:0] - dst[63:32] := src2[31:0] - dst[95:64] := src1[63:32] - dst[127:96] := src2[63:32] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) - dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) - dst[383:256] := INTERLEAVE_DWORDS(a[383:256], b[383:256]) - dst[511:384] := INTERLEAVE_DWORDS(a[511:384], b[511:384]) - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) +dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128]) +dst[383:256] := INTERLEAVE_DWORDS(a[383:256], b[383:256]) +dst[511:384] := INTERLEAVE_DWORDS(a[511:384], b[511:384]) +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Compare the lower double-precision (64-bit) floating-point element in "a" and - "b" based on the comparison operand specified by "imm8", and store the result in mask - vector "k". [sae_note] - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - k[0] := ( a[63:0] OP b[63:0] ) ? 1 : 0 - k[MAX:1] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and store the result in mask vector "k". [sae_note] + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +k[0] := ( a[63:0] OP b[63:0] ) ? 1 : 0 +k[MAX:1] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare the lower double-precision (64-bit) floating-point element in "a" and - "b" based on the comparison operand specified by "imm8", and store the result in mask - vector "k". - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - k[0] := ( a[63:0] OP b[63:0] ) ? 1 : 0 - k[MAX:1] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and store the result in mask vector "k". + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +k[0] := ( a[63:0] OP b[63:0] ) ? 1 : 0 +k[MAX:1] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - - - Compare the lower double-precision (64-bit) floating-point element in "a" and - "b" based on the comparison operand specified by "imm8", and store the result in mask - vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set). - [sae_note] - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - IF k1[0] - k[0] := ( a[63:0] OP b[63:0] ) ? 1 : 0 - ELSE - k[0] := 0 - FI - k[MAX:1] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and store the result in mask vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set). [sae_note] + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +IF k1[0] + k[0] := ( a[63:0] OP b[63:0] ) ? 1 : 0 +ELSE + k[0] := 0 +FI +k[MAX:1] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - - Compare the lower double-precision (64-bit) floating-point element in "a" and - "b" based on the comparison operand specified by "imm8", and store the result in mask - vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set). - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - IF k1[0] - k[0] := ( a[63:0] OP b[63:0] ) ? 1 : 0 - ELSE - k[0] := 0 - FI - k[MAX:1] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and store the result in mask vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set). + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +IF k1[0] + k[0] := ( a[63:0] OP b[63:0] ) ? 1 : 0 +ELSE + k[0] := 0 +FI +k[MAX:1] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - - Compare the lower single-precision (32-bit) floating-point element in "a" and - "b" based on the comparison operand specified by "imm8", and store the result in mask - vector "k". [sae_note] - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - k[0] := ( a[31:0] OP b[31:0] ) ? 1 : 0 - k[MAX:1] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and store the result in mask vector "k". [sae_note] + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +k[0] := ( a[31:0] OP b[31:0] ) ? 1 : 0 +k[MAX:1] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare the lower single-precision (32-bit) floating-point element in "a" and - "b" based on the comparison operand specified by "imm8", and store the result in mask - vector "k". - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - k[0] := ( a[31:0] OP b[31:0] ) ? 1 : 0 - k[MAX:1] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and store the result in mask vector "k". + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +k[0] := ( a[31:0] OP b[31:0] ) ? 1 : 0 +k[MAX:1] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - - - Compare the lower single-precision (32-bit) floating-point element in "a" and - "b" based on the comparison operand specified by "imm8", and store the result in mask - vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set). - [sae_note] - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - IF k1[0] - k[0] := ( a[31:0] OP b[31:0] ) ? 1 : 0 - ELSE - k[0] := 0 - FI - k[MAX:1] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and store the result in mask vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set). [sae_note] + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +IF k1[0] + k[0] := ( a[31:0] OP b[31:0] ) ? 1 : 0 +ELSE + k[0] := 0 +FI +k[MAX:1] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - - Compare the lower single-precision (32-bit) floating-point element in "a" and - "b" based on the comparison operand specified by "imm8", and store the result in mask - vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set). - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - IF k1[0] - k[0] := ( a[31:0] OP b[31:0] ) ? 1 : 0 - ELSE - k[0] := 0 - FI - k[MAX:1] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and store the result in mask vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set). + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +IF k1[0] + k[0] := ( a[31:0] OP b[31:0] ) ? 1 : 0 +ELSE + k[0] := 0 +FI +k[MAX:1] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - - Compare the lower double-precision (64-bit) floating-point element in "a" and - "b" based on the comparison operand specified by "imm8", and return the boolean result - (0 or 1). [sae_note] - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - RETURN ( a[63:0] OP b[63:0] ) ? 1 : 0 - - - AVX512F -
immintrin.h
- Compare + + + + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and return the boolean result (0 or 1). [sae_note] + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +RETURN ( a[63:0] OP b[63:0] ) ? 1 : 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - - Compare the lower single-precision (32-bit) floating-point element in "a" and - "b" based on the comparison operand specified by "imm8", and return the boolean result - (0 or 1). [sae_note] - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - RETURN ( a[31:0] OP b[31:0] ) ? 1 : 0 - - - - AVX512F -
immintrin.h
- Compare + + + + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and return the boolean result (0 or 1). [sae_note] + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +RETURN ( a[31:0] OP b[31:0] ) ? 1 : 0 + + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed signed 32-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k". - - FOR j := 0 to 15 - i := j*32 - k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed signed 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed signed 32-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed signed 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed signed 64-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k". - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 7 - i := j*64 - k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed signed 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed 64-bit integers in "a" and "b" for equality, and store the - results in mask vector "k". - - FOR j := 0 to 7 - i := j*64 - k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed signed 64-bit integers in "a" and "b" for greater-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 7 - i := j*64 - k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed signed 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed signed 64-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k". - - FOR j := 0 to 7 - i := j*64 - k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed signed 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed signed 64-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 7 - i := j*64 - k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed signed 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed signed 64-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k". - - FOR j := 0 to 7 - i := j*64 - k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed signed 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed signed 64-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k". - - FOR j := 0 to 7 - i := j*64 - k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed signed 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - - Compare packed signed 64-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + + Compare packed signed 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed 64-bit integers in "a" and "b" for equality, and store the - results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed signed 64-bit integers in "a" and "b" for greater-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed signed 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed signed 64-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed signed 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed signed 64-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed signed 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed signed 64-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed signed 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed signed 64-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed signed 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 64-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k". - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 7 - i := j*64 - k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed unsigned 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed unsigned 64-bit integers in "a" and "b" for equality, and store - the results in mask vector "k". - - FOR j := 0 to 7 - i := j*64 - k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed unsigned 64-bit integers in "a" and "b" for - greater-than-or-equal, and store the results in mask vector "k". - - FOR j := 0 to 7 - i := j*64 - k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed unsigned 64-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k". - - FOR j := 0 to 7 - i := j*64 - k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed unsigned 64-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 7 - i := j*64 - k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed unsigned 64-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k". - - FOR j := 0 to 7 - i := j*64 - k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed unsigned 64-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k". - - FOR j := 0 to 7 - i := j*64 - k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed unsigned 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - - Compare packed unsigned 64-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + + Compare packed unsigned 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 64-bit integers in "a" and "b" for equality, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed unsigned 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 64-bit integers in "a" and "b" for - greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed unsigned 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] >= b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 64-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed unsigned 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] > b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 64-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed unsigned 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] <= b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 64-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed unsigned 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] < b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 64-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed unsigned 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - Convert packed signed 32-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - m := j*64 - dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed signed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + m := j*64 + dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed signed 32-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - m := j*64 - IF k[j] - dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) - ELSE - dst[m+63:m] := src[m+63:m] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed signed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + m := j*64 + IF k[j] + dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) + ELSE + dst[m+63:m] := src[m+63:m] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed signed 32-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - m := j*64 - IF k[j] - dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) - ELSE - dst[m+63:m] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed signed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + m := j*64 + IF k[j] + dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) + ELSE + dst[m+63:m] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed signed 32-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst". - [round_note] - - FOR j := 0 to 15 - i := 32*j - dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed signed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + [round_note] + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed signed 32-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 15 - i := 32*j - dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed signed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - - Convert packed signed 32-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + + Convert packed signed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed signed 32-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed signed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed signed 32-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 15 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed signed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed signed 32-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed signed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst". - [round_note] - - FOR j := 0 to 7 - i := 32*j - k := 64*j - dst[i+31:i] := Convert_FP64_To_Int32(a[k+63:k]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_Int32(a[k+63:k]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst". - - FOR j := 0 to 7 - i := 32*j - k := 64*j - dst[i+31:i] := Convert_FP64_To_Int32(a[k+63:k]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_Int32(a[k+63:k]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*32 - l := j*64 - IF k[j] - dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*32 + l := j*64 + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - l := j*64 - IF k[j] - dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + l := j*64 + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed single-precision (32-bit) floating-point elements, and store the results in - "dst". - [round_note] - - FOR j := 0 to 7 - i := 32*j - k := 64*j - dst[i+31:i] := Convert_FP64_To_FP32(a[k+63:k]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_FP32(a[k+63:k]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed single-precision (32-bit) floating-point elements, and store the results in - "dst". - - FOR j := 0 to 7 - i := 32*j - k := 64*j - dst[i+31:i] := Convert_FP64_To_FP32(a[k+63:k]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_FP32(a[k+63:k]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed single-precision (32-bit) floating-point elements, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - [round_note] - - FOR j := 0 to 7 - i := j*32 - l := j*64 - IF k[j] - dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*32 + l := j*64 + IF k[j] + dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed single-precision (32-bit) floating-point elements, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 7 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed single-precision (32-bit) floating-point elements, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*32 - l := j*64 - IF k[j] - dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*32 + l := j*64 + IF k[j] + dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed single-precision (32-bit) floating-point elements, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - l := j*64 - IF k[j] - dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + l := j*64 + IF k[j] + dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 32-bit integers, and store the results in "dst". - [round_note] - - FOR j := 0 to 7 - i := 32*j - k := 64*j - dst[i+31:i] := Convert_FP64_To_UInt32(a[k+63:k]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_UInt32(a[k+63:k]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 32-bit integers, and store the results in "dst". - - FOR j := 0 to 7 - i := 32*j - k := 64*j - dst[i+31:i] := Convert_FP64_To_UInt32(a[k+63:k]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_UInt32(a[k+63:k]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*32 - l := j*64 - IF k[j] - dst[i+31:i] := Convert_FP64_To_UInt32(a[l+63:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*32 + l := j*64 + IF k[j] + dst[i+31:i] := Convert_FP64_To_UInt32(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - l := j*64 - IF k[j] - dst[i+31:i] := Convert_FP64_To_UInt32(a[l+63:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + l := j*64 + IF k[j] + dst[i+31:i] := Convert_FP64_To_UInt32(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_UInt32(a[l+63:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UInt32(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_UInt32(a[l+63:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UInt32(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst". - [sae_note] - - FOR j := 0 to 15 - i := j*32 - m := j*16 - dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". [sae_note] + +FOR j := 0 to 15 + i := j*32 + m := j*16 + dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - m := j*16 - dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + m := j*16 + dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). [sae_note] - - FOR j := 0 to 15 - i := j*32 - m := j*16 - IF k[j] - dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 to 15 + i := j*32 + m := j*16 + IF k[j] + dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - i := j*32 - m := j*16 - IF k[j] - dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + m := j*16 + IF k[j] + dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [sae_note] - - FOR j := 0 to 15 - i := j*32 - m := j*16 - IF k[j] - dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 to 15 + i := j*32 + m := j*16 + IF k[j] + dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - m := j*16 - IF k[j] - dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + m := j*16 + IF k[j] + dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst". - [round_note] - - FOR j := 0 to 15 - i := 32*j - dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst". + [round_note] + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst". - - FOR j := 0 to 15 - i := 32*j - dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 15 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed double-precision (64-bit) floating-point elements, and store the results in - "dst". [sae_note] - - FOR j := 0 to 7 - i := 64*j - k := 32*j - dst[i+63:i] := Convert_FP32_To_FP64(a[k+31:k]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". [sae_note] + +FOR j := 0 to 7 + i := 64*j + k := 32*j + dst[i+63:i] := Convert_FP32_To_FP64(a[k+31:k]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed double-precision (64-bit) floating-point elements, and store the results in - "dst". - - FOR j := 0 to 7 - i := 64*j - k := 32*j - dst[i+63:i] := Convert_FP32_To_FP64(a[k+31:k]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 32*j + dst[i+63:i] := Convert_FP32_To_FP64(a[k+31:k]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed double-precision (64-bit) floating-point elements, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). [sae_note] - - FOR j := 0 to 7 - i := 64*j - l := 32*j - IF k[j] - dst[i+63:i] := Convert_FP32_To_FP64(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := Convert_FP32_To_FP64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed double-precision (64-bit) floating-point elements, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 7 - i := 64*j - l := 32*j - IF k[j] - dst[i+63:i] := Convert_FP32_To_FP64(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := Convert_FP32_To_FP64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed double-precision (64-bit) floating-point elements, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [sae_note] - - FOR j := 0 to 7 - i := 64*j - l := 32*j - IF k[j] - dst[i+63:i] := Convert_FP32_To_FP64(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := Convert_FP32_To_FP64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed double-precision (64-bit) floating-point elements, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 32*j - IF k[j] - dst[i+63:i] := Convert_FP32_To_FP64(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := Convert_FP32_To_FP64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst". - [round2_note] - - FOR j := 0 to 15 - i := 16*j - l := 32*j - dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". [round2_note] + +FOR j := 0 to 15 + i := 16*j + l := 32*j + dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst". - [round2_note] - - FOR j := 0 to 15 - i := 16*j - l := 32*j - dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". [round2_note] + +FOR j := 0 to 15 + i := 16*j + l := 32*j + dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). [round2_note] - - FOR j := 0 to 15 - i := 16*j - l := 32*j - IF k[j] - dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round2_note] + +FOR j := 0 to 15 + i := 16*j + l := 32*j + IF k[j] + dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). [round2_note] - - FOR j := 0 to 15 - i := 16*j - l := 32*j - IF k[j] - dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round2_note] + +FOR j := 0 to 15 + i := 16*j + l := 32*j + IF k[j] + dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [round2_note] - - FOR j := 0 to 15 - i := 16*j - l := 32*j - IF k[j] - dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round2_note] + +FOR j := 0 to 15 + i := 16*j + l := 32*j + IF k[j] + dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [round2_note] - - FOR j := 0 to 15 - i := 16*j - l := 32*j - IF k[j] - dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round2_note] + +FOR j := 0 to 15 + i := 16*j + l := 32*j + IF k[j] + dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 32-bit integers, and store the results in "dst". - [round_note] - - FOR j := 0 to 15 - i := 32*j - dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst". + [round_note] + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 32-bit integers, and store the results in "dst". - - FOR j := 0 to 15 - i := 32*j - dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 15 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the lower double-precision (64-bit) floating-point element in "a" to a - 32-bit integer, and store the result in "dst". - [round_note] - - dst[31:0] := Convert_FP64_To_Int32(a[63:0]) - - - AVX512F -
immintrin.h
- Convert + + + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst". + [round_note] + +dst[31:0] := Convert_FP64_To_Int32(a[63:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the lower double-precision (64-bit) floating-point element in "a" to a - 64-bit integer, and store the result in "dst". - [round_note] - - dst[63:0] := Convert_FP64_To_Int64(a[63:0]) - - - AVX512F -
immintrin.h
- Convert + + + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst". + [round_note] + +dst[63:0] := Convert_FP64_To_Int64(a[63:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the lower double-precision (64-bit) floating-point element in "a" to a - 32-bit integer, and store the result in "dst". - [round_note] - - dst[31:0] := Convert_FP64_To_Int32(a[63:0]) - - - AVX512F -
immintrin.h
- Convert + + + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst". + [round_note] + +dst[31:0] := Convert_FP64_To_Int32(a[63:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the lower double-precision (64-bit) floating-point element in "a" to a - 64-bit integer, and store the result in "dst". - [round_note] - - dst[63:0] := Convert_FP64_To_Int64(a[63:0]) - - - AVX512F -
immintrin.h
- Convert + + + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst". + [round_note] + +dst[63:0] := Convert_FP64_To_Int64(a[63:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - Convert the lower double-precision (64-bit) floating-point element in "a" to a - 32-bit integer, and store the result in "dst". - - dst[31:0] := Convert_FP64_To_Int32(a[63:0]) - - - AVX512F -
immintrin.h
- Convert + + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst". + +dst[31:0] := Convert_FP64_To_Int32(a[63:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - Convert the lower double-precision (64-bit) floating-point element in "a" to a - 64-bit integer, and store the result in "dst". - - dst[63:0] := Convert_FP64_To_Int64(a[63:0]) - - - AVX512F -
immintrin.h
- Convert + + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst". + +dst[63:0] := Convert_FP64_To_Int64(a[63:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert the lower double-precision (64-bit) floating-point element in "b" to a - single-precision (32-bit) floating-point element, store the result in the lower element - of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". - [round_note] - - dst[31:0] := Convert_FP64_To_FP32(b[63:0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert the lower double-precision (64-bit) floating-point element in "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst[31:0] := Convert_FP64_To_FP32(b[63:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - - - Convert the lower double-precision (64-bit) floating-point element in "b" to a - single-precision (32-bit) floating-point element, store the result in the lower element - of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not - set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". - [round_note] - - IF k[0] - dst[31:0] := Convert_FP64_To_FP32(b[63:0]) - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + + + Convert the lower double-precision (64-bit) floating-point element in "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := Convert_FP64_To_FP32(b[63:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - - Convert the lower double-precision (64-bit) floating-point element in "b" to a - single-precision (32-bit) floating-point element, store the result in the lower element - of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not - set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". - - IF k[0] - dst[31:0] := Convert_FP64_To_FP32(b[63:0]) - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + + Convert the lower double-precision (64-bit) floating-point element in "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := Convert_FP64_To_FP32(b[63:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - - Convert the lower double-precision (64-bit) floating-point element in "b" to a - single-precision (32-bit) floating-point element, store the result in the lower element - of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and - copy the upper 3 packed elements from "a" to the upper elements of "dst". - [round_note] - - IF k[0] - dst[31:0] := Convert_FP64_To_FP32(b[63:0]) - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + + Convert the lower double-precision (64-bit) floating-point element in "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := Convert_FP64_To_FP32(b[63:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert the lower double-precision (64-bit) floating-point element in "b" to a - single-precision (32-bit) floating-point element, store the result in the lower element - of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and - copy the upper 3 packed elements from "a" to the upper elements of "dst". - - IF k[0] - dst[31:0] := Convert_FP64_To_FP32(b[63:0]) - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert the lower double-precision (64-bit) floating-point element in "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := Convert_FP64_To_FP32(b[63:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the lower double-precision (64-bit) floating-point element in "a" to an - unsigned 32-bit integer, and store the result in "dst". - [round_note] - - dst[31:0] := Convert_FP64_To_UInt32(a[63:0]) - - - AVX512F -
immintrin.h
- Convert + + + + Convert the lower double-precision (64-bit) floating-point element in "a" to an unsigned 32-bit integer, and store the result in "dst". + [round_note] + +dst[31:0] := Convert_FP64_To_UInt32(a[63:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the lower double-precision (64-bit) floating-point element in "a" to an - unsigned 64-bit integer, and store the result in "dst". - [round_note] - - dst[63:0] := Convert_FP64_To_UInt64(a[63:0]) - - - AVX512F -
immintrin.h
- Convert + + + + Convert the lower double-precision (64-bit) floating-point element in "a" to an unsigned 64-bit integer, and store the result in "dst". + [round_note] + +dst[63:0] := Convert_FP64_To_UInt64(a[63:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - Convert the lower double-precision (64-bit) floating-point element in "a" to an - unsigned 32-bit integer, and store the result in "dst". - - dst[31:0] := Convert_FP64_To_UInt32(a[63:0]) - - - AVX512F -
immintrin.h
- Convert + + + Convert the lower double-precision (64-bit) floating-point element in "a" to an unsigned 32-bit integer, and store the result in "dst". + +dst[31:0] := Convert_FP64_To_UInt32(a[63:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - Convert the lower double-precision (64-bit) floating-point element in "a" to an - unsigned 64-bit integer, and store the result in "dst". - - dst[63:0] := Convert_FP64_To_UInt64(a[63:0]) - - - AVX512F -
immintrin.h
- Convert + + + Convert the lower double-precision (64-bit) floating-point element in "a" to an unsigned 64-bit integer, and store the result in "dst". + +dst[63:0] := Convert_FP64_To_UInt64(a[63:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert the signed 64-bit integer "b" to a double-precision (64-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper element from "a" to the upper element of "dst". - [round_note] - - dst[63:0] := Convert_Int64_To_FP64(b[63:0]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert the signed 64-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + +dst[63:0] := Convert_Int64_To_FP64(b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert the signed 64-bit integer "b" to a double-precision (64-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper element from "a" to the upper element of "dst". - [round_note] - - dst[63:0] := Convert_Int64_To_FP64(b[63:0]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert the signed 64-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + +dst[63:0] := Convert_Int64_To_FP64(b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the signed 32-bit integer "b" to a double-precision (64-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper element from "a" to the upper element of "dst". - - dst[63:0] := Convert_Int32_To_FP64(b[31:0]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert the signed 32-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := Convert_Int32_To_FP64(b[31:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the signed 64-bit integer "b" to a double-precision (64-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper element from "a" to the upper element of "dst". - - dst[63:0] := Convert_Int64_To_FP64(b[63:0]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert the signed 64-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := Convert_Int64_To_FP64(b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert the signed 32-bit integer "b" to a single-precision (32-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper 3 packed elements from "a" to the upper elements of "dst". - [round_note] - - dst[31:0] := Convert_Int32_To_FP32(b[31:0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert the signed 32-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst[31:0] := Convert_Int32_To_FP32(b[31:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert the signed 64-bit integer "b" to a single-precision (32-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper 3 packed elements from "a" to the upper elements of "dst". - [round_note] - - dst[31:0] := Convert_Int64_To_FP32(b[63:0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert the signed 64-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst[31:0] := Convert_Int64_To_FP32(b[63:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert the signed 32-bit integer "b" to a single-precision (32-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper 3 packed elements from "a" to the upper elements of "dst". - [round_note] - - dst[31:0] := Convert_Int32_To_FP32(b[31:0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert the signed 32-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst[31:0] := Convert_Int32_To_FP32(b[31:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert the signed 64-bit integer "b" to a single-precision (32-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper 3 packed elements from "a" to the upper elements of "dst". - [round_note] - - dst[31:0] := Convert_Int64_To_FP32(b[63:0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert the signed 64-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst[31:0] := Convert_Int64_To_FP32(b[63:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the signed 32-bit integer "b" to a single-precision (32-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper 3 packed elements from "a" to the upper elements of "dst". - - dst[31:0] := Convert_Int32_To_FP32(b[31:0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert the signed 32-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := Convert_Int32_To_FP32(b[31:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the signed 64-bit integer "b" to a single-precision (32-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper 3 packed elements from "a" to the upper elements of "dst". - - dst[31:0] := Convert_Int64_To_FP32(b[63:0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert the signed 64-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := Convert_Int64_To_FP32(b[63:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert the lower single-precision (32-bit) floating-point element in "b" to a - double-precision (64-bit) floating-point element, store the result in the lower element - of "dst", and copy the upper element from "a" to the upper element of "dst". - [sae_note] - - dst[63:0] := Convert_FP32_To_FP64(b[31:0]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert the lower single-precision (32-bit) floating-point element in "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [sae_note] + +dst[63:0] := Convert_FP32_To_FP64(b[31:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - - - Convert the lower single-precision (32-bit) floating-point element in "b" to a - double-precision (64-bit) floating-point element, store the result in the lower element - of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not - set), and copy the upper element from "a" to the upper element of "dst". - [sae_note] - - IF k[0] - dst[63:0] := Convert_FP32_To_FP64(b[31:0]) - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + + + Convert the lower single-precision (32-bit) floating-point element in "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [sae_note] + +IF k[0] + dst[63:0] := Convert_FP32_To_FP64(b[31:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - - Convert the lower single-precision (32-bit) floating-point element in "b" to a - double-precision (64-bit) floating-point element, store the result in the lower element - of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not - set), and copy the upper element from "a" to the upper element of "dst". - - IF k[0] - dst[63:0] := Convert_FP32_To_FP64(b[31:0]) - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + + Convert the lower single-precision (32-bit) floating-point element in "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := Convert_FP32_To_FP64(b[31:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - - Convert the lower single-precision (32-bit) floating-point element in "b" to a - double-precision (64-bit) floating-point element, store the result in the lower element - of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and - copy the upper element from "a" to the upper element of "dst". - [sae_note] - - IF k[0] - dst[63:0] := Convert_FP32_To_FP64(b[31:0]) - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + + Convert the lower single-precision (32-bit) floating-point element in "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [sae_note] + +IF k[0] + dst[63:0] := Convert_FP32_To_FP64(b[31:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert the lower single-precision (32-bit) floating-point element in "b" to a - double-precision (64-bit) floating-point element, store the result in the lower element - of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and - copy the upper element from "a" to the upper element of "dst". - - IF k[0] - dst[63:0] := Convert_FP32_To_FP64(b[31:0]) - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert the lower single-precision (32-bit) floating-point element in "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := Convert_FP32_To_FP64(b[31:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the lower single-precision (32-bit) floating-point element in "a" to a - 32-bit integer, and store the result in "dst". - [round_note] - - dst[31:0] := Convert_FP32_To_Int32(a[31:0]) - - - AVX512F -
immintrin.h
- Convert + + + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst". + [round_note] + +dst[31:0] := Convert_FP32_To_Int32(a[31:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the lower single-precision (32-bit) floating-point element in "a" to a - 64-bit integer, and store the result in "dst". - [round_note] - - dst[63:0] := Convert_FP32_To_Int64(a[31:0]) - - - AVX512F -
immintrin.h
- Convert + + + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst". + [round_note] + +dst[63:0] := Convert_FP32_To_Int64(a[31:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the lower single-precision (32-bit) floating-point element in "a" to a - 32-bit integer, and store the result in "dst". - [round_note] - - dst[31:0] := Convert_FP32_To_Int32(a[31:0]) - - - AVX512F -
immintrin.h
- Convert + + + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst". + [round_note] + +dst[31:0] := Convert_FP32_To_Int32(a[31:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the lower single-precision (32-bit) floating-point element in "a" to a - 64-bit integer, and store the result in "dst". - [round_note] - - dst[63:0] := Convert_FP32_To_Int64(a[31:0]) - - - AVX512F -
immintrin.h
- Convert + + + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst". + [round_note] + +dst[63:0] := Convert_FP32_To_Int64(a[31:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - Convert the lower single-precision (32-bit) floating-point element in "a" to a - 32-bit integer, and store the result in "dst". - - dst[31:0] := Convert_FP32_To_Int32(a[31:0]) - - - AVX512F -
immintrin.h
- Convert + + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst". + +dst[31:0] := Convert_FP32_To_Int32(a[31:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - Convert the lower single-precision (32-bit) floating-point element in "a" to a - 64-bit integer, and store the result in "dst". - - dst[63:0] := Convert_FP32_To_Int64(a[31:0]) - - - AVX512F -
immintrin.h
- Convert + + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst". + +dst[63:0] := Convert_FP32_To_Int64(a[31:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the lower single-precision (32-bit) floating-point element in "a" to an - unsigned 32-bit integer, and store the result in "dst". - [round_note] - - dst[31:0] := Convert_FP32_To_UInt32(a[31:0]) - - - AVX512F -
immintrin.h
- Convert + + + + Convert the lower single-precision (32-bit) floating-point element in "a" to an unsigned 32-bit integer, and store the result in "dst". + [round_note] + +dst[31:0] := Convert_FP32_To_UInt32(a[31:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the lower single-precision (32-bit) floating-point element in "a" to an - unsigned 64-bit integer, and store the result in "dst". - [round_note] - - dst[63:0] := Convert_FP32_To_UInt64(a[31:0]) - - - AVX512F -
immintrin.h
- Convert + + + + Convert the lower single-precision (32-bit) floating-point element in "a" to an unsigned 64-bit integer, and store the result in "dst". + [round_note] + +dst[63:0] := Convert_FP32_To_UInt64(a[31:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - Convert the lower single-precision (32-bit) floating-point element in "a" to an - unsigned 32-bit integer, and store the result in "dst". - - dst[31:0] := Convert_FP32_To_UInt32(a[31:0]) - - - AVX512F -
immintrin.h
- Convert + + + Convert the lower single-precision (32-bit) floating-point element in "a" to an unsigned 32-bit integer, and store the result in "dst". + +dst[31:0] := Convert_FP32_To_UInt32(a[31:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - Convert the lower single-precision (32-bit) floating-point element in "a" to an - unsigned 64-bit integer, and store the result in "dst". - - dst[63:0] := Convert_FP32_To_UInt64(a[31:0]) - - - AVX512F -
immintrin.h
- Convert + + + Convert the lower single-precision (32-bit) floating-point element in "a" to an unsigned 64-bit integer, and store the result in "dst". + +dst[63:0] := Convert_FP32_To_UInt64(a[31:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst". [sae_note] - - FOR j := 0 to 7 - i := 32*j - k := 64*j - dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[k+63:k]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". [sae_note] + +FOR j := 0 to 7 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[k+63:k]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 7 - i := 32*j - k := 64*j - dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[k+63:k]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[k+63:k]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - [sae_note] - - FOR j := 0 to 7 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 to 7 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] - - FOR j := 0 to 7 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 to 7 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 32-bit integers with truncation, and store the results in "dst". - [sae_note] - - FOR j := 0 to 7 - i := 32*j - k := 64*j - dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[k+63:k]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst". [sae_note] + +FOR j := 0 to 7 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[k+63:k]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 32-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 7 - i := 32*j - k := 64*j - dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[k+63:k]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[k+63:k]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 32-bit integers with truncation, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). [sae_note] - - FOR j := 0 to 7 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[l+63:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 to 7 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 32-bit integers with truncation, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[l+63:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 32-bit integers with truncation, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [sae_note] - - FOR j := 0 to 7 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[l+63:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 to 7 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed unsigned 32-bit integers with truncation, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 32*j - l := 64*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[l+63:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 32*j + l := 64*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[l+63:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst". [sae_note] - - FOR j := 0 to 15 - i := 32*j - dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". [sae_note] + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 15 - i := 32*j - dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - [sae_note] - - FOR j := 0 to 15 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] - - FOR j := 0 to 15 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 32-bit integers with truncation, and store the results in "dst". - [sae_note] - - FOR j := 0 to 15 - i := 32*j - dst[i+31:i] := Convert_FP32_To_UInt32_Truncate(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst". [sae_note] + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := Convert_FP32_To_UInt32_Truncate(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 32-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 15 - i := 32*j - dst[i+31:i] := Convert_FP32_To_UInt32_Truncate(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := Convert_FP32_To_UInt32_Truncate(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 32-bit integers with truncation, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). [sae_note] - - FOR j := 0 to 15 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP32_To_UInt32_Truncate(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_UInt32_Truncate(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (32-bit) floating-point elements in "a" to - packed unsigned 32-bit integers with truncation, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed double-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed unsigned 32-bit integers with truncation, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [sae_note] - - FOR j := 0 to 15 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP32_To_UInt32_Truncate(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP32_To_UInt32_Truncate(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed double-precision (32-bit) floating-point elements in "a" to - packed unsigned 32-bit integers with truncation, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed double-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the lower double-precision (64-bit) floating-point element in "a" to a - 32-bit integer with truncation, and store the result in "dst". - [sae_note] - - dst[31:0] := Convert_FP64_To_Int32_Truncate(a[63:0]) - - - AVX512F -
immintrin.h
- Convert + + + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst". + [sae_note] + +dst[31:0] := Convert_FP64_To_Int32_Truncate(a[63:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the lower double-precision (64-bit) floating-point element in "a" to a - 64-bit integer with truncation, and store the result in "dst". - [sae_note] - - dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0]) - - - AVX512F -
immintrin.h
- Convert + + + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst". + [sae_note] + +dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the lower double-precision (64-bit) floating-point element in "a" to a - 32-bit integer with truncation, and store the result in "dst". - [sae_note] - - dst[31:0] := Convert_FP64_To_Int32_Truncate(a[63:0]) - - - AVX512F -
immintrin.h
- Convert + + + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst". + [sae_note] + +dst[31:0] := Convert_FP64_To_Int32_Truncate(a[63:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the lower double-precision (64-bit) floating-point element in "a" to a - 64-bit integer with truncation, and store the result in "dst". - [sae_note] - - dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0]) - - - AVX512F -
immintrin.h
- Convert + + + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst". + [sae_note] + +dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - Convert the lower double-precision (64-bit) floating-point element in "a" to a - 32-bit integer with truncation, and store the result in "dst". - - dst[31:0] := Convert_FP64_To_Int32_Truncate(a[63:0]) - - - AVX512F -
immintrin.h
- Convert + + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst". + +dst[31:0] := Convert_FP64_To_Int32_Truncate(a[63:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - Convert the lower double-precision (64-bit) floating-point element in "a" to a - 64-bit integer with truncation, and store the result in "dst". - - dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0]) - - - AVX512F -
immintrin.h
- Convert + + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst". + +dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the lower double-precision (64-bit) floating-point element in "a" to an - unsigned 32-bit integer with truncation, and store the result in "dst". - [sae_note] - - dst[31:0] := Convert_FP64_To_UInt32_Truncate(a[63:0]) - - - AVX512F -
immintrin.h
- Convert + + + + Convert the lower double-precision (64-bit) floating-point element in "a" to an unsigned 32-bit integer with truncation, and store the result in "dst". + [sae_note] + +dst[31:0] := Convert_FP64_To_UInt32_Truncate(a[63:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the lower double-precision (64-bit) floating-point element in "a" to an - unsigned 64-bit integer with truncation, and store the result in "dst". - [sae_note] - - dst[63:0] := Convert_FP64_To_UInt64_Truncate(a[63:0]) - - - AVX512F -
immintrin.h
- Convert + + + + Convert the lower double-precision (64-bit) floating-point element in "a" to an unsigned 64-bit integer with truncation, and store the result in "dst". + [sae_note] + +dst[63:0] := Convert_FP64_To_UInt64_Truncate(a[63:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - Convert the lower double-precision (64-bit) floating-point element in "a" to an - unsigned 32-bit integer with truncation, and store the result in "dst". - - dst[31:0] := Convert_FP64_To_UInt32_Truncate(a[63:0]) - - - AVX512F -
immintrin.h
- Convert + + + Convert the lower double-precision (64-bit) floating-point element in "a" to an unsigned 32-bit integer with truncation, and store the result in "dst". + +dst[31:0] := Convert_FP64_To_UInt32_Truncate(a[63:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - Convert the lower double-precision (64-bit) floating-point element in "a" to an - unsigned 64-bit integer with truncation, and store the result in "dst". - - dst[63:0] := Convert_FP64_To_UInt64_Truncate(a[63:0]) - - - AVX512F -
immintrin.h
- Convert + + + Convert the lower double-precision (64-bit) floating-point element in "a" to an unsigned 64-bit integer with truncation, and store the result in "dst". + +dst[63:0] := Convert_FP64_To_UInt64_Truncate(a[63:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the lower single-precision (32-bit) floating-point element in "a" to a - 32-bit integer with truncation, and store the result in "dst". - [sae_note] - - dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0]) - - - AVX512F -
immintrin.h
- Convert + + + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst". + [sae_note] + +dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the lower single-precision (32-bit) floating-point element in "a" to a - 64-bit integer with truncation, and store the result in "dst". - [sae_note] - - dst[63:0] := Convert_FP32_To_Int64_Truncate(a[31:0]) - - - AVX512F -
immintrin.h
- Convert + + + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst". + [sae_note] + +dst[63:0] := Convert_FP32_To_Int64_Truncate(a[31:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the lower single-precision (32-bit) floating-point element in "a" to a - 32-bit integer with truncation, and store the result in "dst". - [sae_note] - - dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0]) - - - AVX512F -
immintrin.h
- Convert + + + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst". + [sae_note] + +dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the lower single-precision (32-bit) floating-point element in "a" to a - 64-bit integer with truncation, and store the result in "dst". - [sae_note] - - dst[63:0] := Convert_FP32_To_Int64_Truncate(a[31:0]) - - - AVX512F -
immintrin.h
- Convert + + + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst". + [sae_note] + +dst[63:0] := Convert_FP32_To_Int64_Truncate(a[31:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - Convert the lower single-precision (32-bit) floating-point element in "a" to a - 32-bit integer with truncation, and store the result in "dst". - - dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0]) - - - AVX512F -
immintrin.h
- Convert + + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst". + +dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - Convert the lower single-precision (32-bit) floating-point element in "a" to a - 64-bit integer with truncation, and store the result in "dst". - - dst[63:0] := Convert_FP32_To_Int64_Truncate(a[31:0]) - - - AVX512F -
immintrin.h
- Convert + + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst". + +dst[63:0] := Convert_FP32_To_Int64_Truncate(a[31:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the lower single-precision (32-bit) floating-point element in "a" to an - unsigned 32-bit integer with truncation, and store the result in "dst". - [sae_note] - - dst[31:0] := Convert_FP32_To_UInt32_Truncate(a[31:0]) - - - AVX512F -
immintrin.h
- Convert + + + + Convert the lower single-precision (32-bit) floating-point element in "a" to an unsigned 32-bit integer with truncation, and store the result in "dst". + [sae_note] + +dst[31:0] := Convert_FP32_To_UInt32_Truncate(a[31:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the lower single-precision (32-bit) floating-point element in "a" to an - unsigned 64-bit integer with truncation, and store the result in "dst". - [sae_note] - - dst[63:0] := Convert_FP32_To_UInt64_Truncate(a[31:0]) - - - AVX512F -
immintrin.h
- Convert + + + + Convert the lower single-precision (32-bit) floating-point element in "a" to an unsigned 64-bit integer with truncation, and store the result in "dst". + [sae_note] + +dst[63:0] := Convert_FP32_To_UInt64_Truncate(a[31:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - Convert the lower single-precision (32-bit) floating-point element in "a" to an - unsigned 32-bit integer with truncation, and store the result in "dst". - - dst[31:0] := Convert_FP32_To_UInt32_Truncate(a[31:0]) - - - AVX512F -
immintrin.h
- Convert + + + Convert the lower single-precision (32-bit) floating-point element in "a" to an unsigned 32-bit integer with truncation, and store the result in "dst". + +dst[31:0] := Convert_FP32_To_UInt32_Truncate(a[31:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - Convert the lower single-precision (32-bit) floating-point element in "a" to an - unsigned 64-bit integer with truncation, and store the result in "dst". - - dst[63:0] := Convert_FP32_To_UInt64_Truncate(a[31:0]) - - - AVX512F -
immintrin.h
- Convert + + + Convert the lower single-precision (32-bit) floating-point element in "a" to an unsigned 64-bit integer with truncation, and store the result in "dst". + +dst[63:0] := Convert_FP32_To_UInt64_Truncate(a[31:0]) + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed unsigned 32-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - l := j*32 - dst[i+63:i] := Convert_Int64_To_FP64(a[l+31:l]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + l := j*32 + dst[i+63:i] := Convert_Int64_To_FP64(a[l+31:l]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 32-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_Int64_To_FP64(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed unsigned 32-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[i+63:i] := Convert_Int64_To_FP64(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[i+63:i] := Convert_Int64_To_FP64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed unsigned 32-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst". - [round_note] - - FOR j := 0 to 15 - i := 32*j - dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed unsigned 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + [round_note] + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed unsigned 32-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 15 - i := 32*j - dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed unsigned 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - - Convert packed unsigned 32-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + + Convert packed unsigned 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 32-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed unsigned 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 32-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 15 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed unsigned 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed unsigned 32-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - IF k[j] - dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed unsigned 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + IF k[j] + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert the unsigned 64-bit integer "b" to a double-precision (64-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper element from "a" to the upper element of "dst". - [round_note] - - dst[63:0] := Convert_Int64_To_FP64(b[63:0]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert the unsigned 64-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + +dst[63:0] := Convert_Int64_To_FP64(b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the unsigned 32-bit integer "b" to a double-precision (64-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper element from "a" to the upper element of "dst". - - dst[63:0] := Convert_Int32_To_FP64(b[31:0]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert the unsigned 32-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := Convert_Int32_To_FP64(b[31:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the unsigned 64-bit integer "b" to a double-precision (64-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper element from "a" to the upper element of "dst". - - dst[63:0] := Convert_Int64_To_FP64(b[63:0]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert the unsigned 64-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := Convert_Int64_To_FP64(b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert the unsigned 32-bit integer "b" to a single-precision (32-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper 3 packed elements from "a" to the upper elements of "dst". - [round_note] - - dst[31:0] := Convert_Int32_To_FP32(b[31:0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert the unsigned 32-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst[31:0] := Convert_Int32_To_FP32(b[31:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert the unsigned 64-bit integer "b" to a single-precision (32-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper 3 packed elements from "a" to the upper elements of "dst". - [round_note] - - dst[31:0] := Convert_Int64_To_FP32(b[63:0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert the unsigned 64-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst[31:0] := Convert_Int64_To_FP32(b[63:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the unsigned 32-bit integer "b" to a single-precision (32-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper 3 packed elements from "a" to the upper elements of "dst". - - dst[31:0] := Convert_Int32_To_FP32(b[31:0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert the unsigned 32-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := Convert_Int32_To_FP32(b[31:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert the unsigned 64-bit integer "b" to a single-precision (32-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper 3 packed elements from "a" to the upper elements of "dst". - - dst[31:0] := Convert_Int64_To_FP32(b[63:0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert the unsigned 64-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := Convert_Int64_To_FP32(b[63:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst". - - FOR j := 0 to 15 - i := 32*j - k := 8*j - dst[k+7:k] := Truncate8(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + k := 8*j + dst[k+7:k] := Truncate8(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - l := 8*j - IF k[j] - dst[l+7:l] := Truncate8(a[i+31:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate8(a[i+31:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- Store - - - - - Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, - and store the active results (those with their respective bit set in writemask "k") to - unaligned memory at "base_addr". - - FOR j := 0 to 15 - i := 32*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+31:i]) - FI - ENDFOR - - - AVX512F -
immintrin.h
- Convert + Store + + + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 15 + i := 32*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+31:i]) + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - l := 8*j - IF k[j] - dst[l+7:l] := Truncate8(a[i+31:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate8(a[i+31:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed 32-bit integers in "a" to packed 16-bit integers with - truncation, and store the results in "dst". - - FOR j := 0 to 15 - i := 32*j - k := 16*j - dst[k+15:k] := Truncate16(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + k := 16*j + dst[k+15:k] := Truncate16(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed 32-bit integers in "a" to packed 16-bit integers with - truncation, and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - l := 16*j - IF k[j] - dst[l+15:l] := Truncate16(a[i+31:i]) - ELSE - dst[l+15:l] := src[l+15:l] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Truncate16(a[i+31:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- Store - - - - - Convert packed 32-bit integers in "a" to packed 16-bit integers with - truncation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 15 - i := 32*j - l := 16*j - IF k[j] - MEM[base_addr+l+15:base_addr+l] := Truncate16(a[i+31:i]) - FI - ENDFOR - - - AVX512F -
immintrin.h
- Convert + Store + + + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 15 + i := 32*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Truncate16(a[i+31:i]) + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed 32-bit integers in "a" to packed 16-bit integers with - truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - l := 16*j - IF k[j] - dst[l+15:l] := Truncate16(a[i+31:i]) - ELSE - dst[l+15:l] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Truncate16(a[i+31:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst". - - FOR j := 0 to 7 - i := 64*j - k := 8*j - dst[k+7:k] := Truncate8(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 8*j + dst[k+7:k] := Truncate8(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 8*j - IF k[j] - dst[l+7:l] := Truncate8(a[i+63:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate8(a[i+63:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- Store - - - - - Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, - and store the active results (those with their respective bit set in writemask "k") to - unaligned memory at "base_addr". - - FOR j := 0 to 7 - i := 64*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+63:i]) - FI - ENDFOR - - - AVX512F -
immintrin.h
- Convert + Store + + + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 64*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+63:i]) + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 8*j - IF k[j] - dst[l+7:l] := Truncate8(a[i+63:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Truncate8(a[i+63:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed 64-bit integers in "a" to packed 32-bit integers with - truncation, and store the results in "dst". - - FOR j := 0 to 7 - i := 64*j - k := 32*j - dst[k+31:k] := Truncate32(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 32*j + dst[k+31:k] := Truncate32(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed 64-bit integers in "a" to packed 32-bit integers with - truncation, and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 32*j - IF k[j] - dst[l+31:l] := Truncate32(a[i+63:i]) - ELSE - dst[l+31:l] := src[l+31:l] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Truncate32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- Store - - - - - Convert packed 64-bit integers in "a" to packed 32-bit integers with - truncation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 7 - i := 64*j - l := 32*j - IF k[j] - MEM[base_addr+l+31:base_addr+l] := Truncate32(a[i+63:i]) - FI - ENDFOR - - - AVX512F -
immintrin.h
- Convert + Store + + + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + MEM[base_addr+l+31:base_addr+l] := Truncate32(a[i+63:i]) + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed 64-bit integers in "a" to packed 32-bit integers with - truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 32*j - IF k[j] - dst[l+31:l] := Truncate32(a[i+63:i]) - ELSE - dst[l+31:l] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Truncate32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed 64-bit integers in "a" to packed 16-bit integers with - truncation, and store the results in "dst". - - FOR j := 0 to 7 - i := 64*j - k := 16*j - dst[k+15:k] := Truncate16(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 16*j + dst[k+15:k] := Truncate16(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed 64-bit integers in "a" to packed 16-bit integers with - truncation, and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 16*j - IF k[j] - dst[l+15:l] := Truncate16(a[i+63:i]) - ELSE - dst[l+15:l] := src[l+15:l] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Truncate16(a[i+63:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- Store - - - - - Convert packed 64-bit integers in "a" to packed 16-bit integers with - truncation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 7 - i := 64*j - l := 16*j - IF k[j] - MEM[base_addr+l+15:base_addr+l] := Truncate16(a[i+63:i]) - FI - ENDFOR - - - AVX512F -
immintrin.h
- Convert + Store + + + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 64*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Truncate16(a[i+63:i]) + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed 64-bit integers in "a" to packed 16-bit integers with - truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 16*j - IF k[j] - dst[l+15:l] := Truncate16(a[i+63:i]) - ELSE - dst[l+15:l] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Truncate16(a[i+63:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed signed 32-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst". - - FOR j := 0 to 15 - i := 32*j - k := 8*j - dst[k+7:k] := Saturate8(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed signed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + k := 8*j + dst[k+7:k] := Saturate8(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed signed 32-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - l := 8*j - IF k[j] - dst[l+7:l] := Saturate8(a[i+31:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed signed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate8(a[i+31:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- Store - - - - - Convert packed signed 32-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 15 - i := 32*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+31:i]) - FI - ENDFOR - - - AVX512F -
immintrin.h
- Convert + Store + + + + + Convert packed signed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 15 + i := 32*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+31:i]) + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed signed 32-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - l := 8*j - IF k[j] - dst[l+7:l] := Saturate8(a[i+31:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed signed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate8(a[i+31:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed signed 32-bit integers in "a" to packed 16-bit integers with - signed saturation, and store the results in "dst". - - FOR j := 0 to 15 - i := 32*j - k := 16*j - dst[k+15:k] := Saturate16(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed signed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + k := 16*j + dst[k+15:k] := Saturate16(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed signed 32-bit integers in "a" to packed 16-bit integers with - signed saturation, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - l := 16*j - IF k[j] - dst[l+15:l] := Saturate16(a[i+31:i]) - ELSE - dst[l+15:l] := src[l+15:l] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed signed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate16(a[i+31:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- Store - - - - - Convert packed signed 32-bit integers in "a" to packed 16-bit integers with - signed saturation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 15 - i := 32*j - l := 16*j - IF k[j] - MEM[base_addr+l+15:base_addr+l] := Saturate16(a[i+31:i]) - FI - ENDFOR - - - AVX512F -
immintrin.h
- Convert + Store + + + + + Convert packed signed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 15 + i := 32*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Saturate16(a[i+31:i]) + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed signed 32-bit integers in "a" to packed 16-bit integers with - signed saturation, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - l := 16*j - IF k[j] - dst[l+15:l] := Saturate16(a[i+31:i]) - ELSE - dst[l+15:l] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed signed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate16(a[i+31:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed signed 64-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst". - - FOR j := 0 to 7 - i := 64*j - k := 8*j - dst[k+7:k] := Saturate8(a[i+63:i]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed signed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 8*j + dst[k+7:k] := Saturate8(a[i+63:i]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed signed 64-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 8*j - IF k[j] - dst[l+7:l] := Saturate8(a[i+63:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed signed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate8(a[i+63:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F +
immintrin.h
+ Convert
- Store - - - - - Convert packed signed 64-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 7 - i := 64*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+63:i]) - FI - ENDFOR - - - AVX512F -
immintrin.h
- Convert + Store + + + + + Convert packed signed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 64*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+63:i]) + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed signed 64-bit integers in "a" to packed 8-bit integers with - signed saturation, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 8*j - IF k[j] - dst[l+7:l] := Saturate8(a[i+63:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed signed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := Saturate8(a[i+63:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed signed 64-bit integers in "a" to packed 32-bit integers with - signed saturation, and store the results in "dst". - - FOR j := 0 to 7 - i := 64*j - k := 32*j - dst[k+31:k] := Saturate32(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed signed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 32*j + dst[k+31:k] := Saturate32(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed signed 64-bit integers in "a" to packed 32-bit integers with - signed saturation, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 32*j - IF k[j] - dst[l+31:l] := Saturate32(a[i+63:i]) - ELSE - dst[l+31:l] := src[l+31:l] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed signed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Saturate32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- Store - - - - - Convert packed signed 64-bit integers in "a" to packed 32-bit integers with - signed saturation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 7 - i := 64*j - l := 32*j - IF k[j] - MEM[base_addr+l+31:base_addr+l] := Saturate32(a[i+63:i]) - FI - ENDFOR - - - AVX512F -
immintrin.h
- Convert + Store + + + + + Convert packed signed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + MEM[base_addr+l+31:base_addr+l] := Saturate32(a[i+63:i]) + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed signed 64-bit integers in "a" to packed 32-bit integers with - signed saturation, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 32*j - IF k[j] - dst[l+31:l] := Saturate32(a[i+63:i]) - ELSE - dst[l+31:l] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed signed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := Saturate32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed signed 64-bit integers in "a" to packed 16-bit integers with - signed saturation, and store the results in "dst". - - FOR j := 0 to 7 - i := 64*j - k := 16*j - dst[k+15:k] := Saturate16(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed signed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 16*j + dst[k+15:k] := Saturate16(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed signed 64-bit integers in "a" to packed 16-bit integers with - signed saturation, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 16*j - IF k[j] - dst[l+15:l] := Saturate16(a[i+63:i]) - ELSE - dst[l+15:l] := src[l+15:l] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed signed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate16(a[i+63:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- Store - - - - - Convert packed signed 64-bit integers in "a" to packed 16-bit integers with - signed saturation, and store the active results (those with their respective bit set in - writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 7 - i := 64*j - l := 16*j - IF k[j] - MEM[base_addr+l+15:base_addr+l] := Saturate16(a[i+63:i]) - FI - ENDFOR - - - AVX512F -
immintrin.h
- Convert + Store + + + + + Convert packed signed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 64*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := Saturate16(a[i+63:i]) + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed signed 64-bit integers in "a" to packed 16-bit integers with - signed saturation, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 16*j - IF k[j] - dst[l+15:l] := Saturate16(a[i+63:i]) - ELSE - dst[l+15:l] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed signed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := Saturate16(a[i+63:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Sign extend packed 8-bit integers in "a" to packed 32-bit integers, and store - the results in "dst". - - FOR j := 0 to 15 - i := 32*j - k := 8*j - dst[i+31:i] := SignExtend32(a[k+7:k]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Sign extend packed 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + k := 8*j + dst[i+31:i] := SignExtend32(a[k+7:k]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Sign extend packed 8-bit integers in "a" to packed 32-bit integers, and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - l := 8*j - IF k[j] - dst[i+31:i] := SignExtend32(a[l+7:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Sign extend packed 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 8*j + IF k[j] + dst[i+31:i] := SignExtend32(a[l+7:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Sign extend packed 8-bit integers in "a" to packed 32-bit integers, and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - l := 8*j - IF k[j] - dst[i+31:i] := SignExtend32(a[l+7:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Sign extend packed 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 8*j + IF k[j] + dst[i+31:i] := SignExtend32(a[l+7:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 64-bit - integers, and store the results in "dst". - - FOR j := 0 to 7 - i := 64*j - k := 8*j - dst[i+63:i] := SignExtend64(a[k+7:k]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 8*j + dst[i+63:i] := SignExtend64(a[k+7:k]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 64-bit - integers, and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 8*j - IF k[j] - dst[i+63:i] := SignExtend64(a[l+7:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 8*j + IF k[j] + dst[i+63:i] := SignExtend64(a[l+7:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 64-bit - integers, and store the results in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 8*j - IF k[j] - dst[i+63:i] := SignExtend64(a[l+7:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 8*j + IF k[j] + dst[i+63:i] := SignExtend64(a[l+7:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store - the results in "dst". - - FOR j := 0 to 7 - i := 64*j - k := 32*j - dst[i+63:i] := SignExtend64(a[k+31:k]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 32*j + dst[i+63:i] := SignExtend64(a[k+31:k]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 32*j - IF k[j] - dst[i+63:i] := SignExtend64(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := SignExtend64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 32*j - IF k[j] - dst[i+63:i] := SignExtend64(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := SignExtend64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store - the results in "dst". - - FOR j := 0 to 15 - i := 32*j - k := 16*j - dst[i+31:i] := SignExtend32(a[k+15:k]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + k := 16*j + dst[i+31:i] := SignExtend32(a[k+15:k]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - l := j*16 - IF k[j] - dst[i+31:i] := SignExtend32(a[l+15:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + l := j*16 + IF k[j] + dst[i+31:i] := SignExtend32(a[l+15:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - l := 16*j - IF k[j] - dst[i+31:i] := SignExtend32(a[l+15:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 16*j + IF k[j] + dst[i+31:i] := SignExtend32(a[l+15:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Sign extend packed 16-bit integers in "a" to packed 64-bit integers, and store - the results in "dst". - - FOR j := 0 to 7 - i := 64*j - k := 16*j - dst[i+63:i] := SignExtend64(a[k+15:k]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Sign extend packed 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 16*j + dst[i+63:i] := SignExtend64(a[k+15:k]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Sign extend packed 16-bit integers in "a" to packed 64-bit integers, and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 16*j - IF k[j] - dst[i+63:i] := SignExtend64(a[l+15:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Sign extend packed 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 16*j + IF k[j] + dst[i+63:i] := SignExtend64(a[l+15:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Sign extend packed 16-bit integers in "a" to packed 64-bit integers, and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 16*j - IF k[j] - dst[i+63:i] := SignExtend64(a[l+15:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Sign extend packed 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 16*j + IF k[j] + dst[i+63:i] := SignExtend64(a[l+15:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst". - - FOR j := 0 to 15 - i := 32*j - k := 8*j - dst[k+7:k] := SaturateU8(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + k := 8*j + dst[k+7:k] := SaturateU8(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - l := 8*j - IF k[j] - dst[l+7:l] := SaturateU8(a[i+31:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := SaturateU8(a[i+31:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- Store - - - - - Convert packed unsigned 32-bit integers in "a" to packed 8-bit integers with - unsigned saturation, and store the active results (those with their respective bit set - in writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 15 - i := 32*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+31:i]) - FI - ENDFOR - - - AVX512F -
immintrin.h
- Convert + Store + + + + + Convert packed unsigned 32-bit integers in "a" to packed 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 15 + i := 32*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+31:i]) + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - l := 8*j - IF k[j] - dst[l+7:l] := SaturateU8(a[i+31:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 8*j + IF k[j] + dst[l+7:l] := SaturateU8(a[i+31:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit - integers with unsigned saturation, and store the results in "dst". - - FOR j := 0 to 15 - i := 32*j - k := 16*j - dst[k+15:k] := SaturateU16(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + k := 16*j + dst[k+15:k] := SaturateU16(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit - integers with unsigned saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - l := 16*j - IF k[j] - dst[l+15:l] := SaturateU16(a[i+31:i]) - ELSE - dst[l+15:l] := src[l+15:l] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := SaturateU16(a[i+31:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- Store - - - - - Convert packed unsigned 32-bit integers in "a" to packed 16-bit integers with - unsigned saturation, and store the active results (those with their respective bit set - in writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 15 - i := 32*j - l := 16*j - IF k[j] - MEM[base_addr+l+15:base_addr+l] := SaturateU16(a[i+31:i]) - FI - ENDFOR - - - AVX512F -
immintrin.h
- Convert + Store + + + + + Convert packed unsigned 32-bit integers in "a" to packed 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 15 + i := 32*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := SaturateU16(a[i+31:i]) + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit - integers with unsigned saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - l := 16*j - IF k[j] - dst[l+15:l] := SaturateU16(a[i+31:i]) - ELSE - dst[l+15:l] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 16*j + IF k[j] + dst[l+15:l] := SaturateU16(a[i+31:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst". - - FOR j := 0 to 7 - i := 64*j - k := 8*j - dst[k+7:k] := SaturateU8(a[i+63:i]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 8*j + dst[k+7:k] := SaturateU8(a[i+63:i]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 8*j - IF k[j] - dst[l+7:l] := SaturateU8(a[i+63:i]) - ELSE - dst[l+7:l] := src[l+7:l] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := SaturateU8(a[i+63:i]) + ELSE + dst[l+7:l] := src[l+7:l] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F +
immintrin.h
+ Convert
- Store - - - - - Convert packed unsigned 64-bit integers in "a" to packed 8-bit integers with - unsigned saturation, and store the active results (those with their respective bit set - in writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 7 - i := 64*j - l := 8*j - IF k[j] - MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+63:i]) - FI - ENDFOR - - - AVX512F -
immintrin.h
- Convert + Store + + + + + Convert packed unsigned 64-bit integers in "a" to packed 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 64*j + l := 8*j + IF k[j] + MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+63:i]) + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit - integers with unsigned saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 8*j - IF k[j] - dst[l+7:l] := SaturateU8(a[i+63:i]) - ELSE - dst[l+7:l] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 8*j + IF k[j] + dst[l+7:l] := SaturateU8(a[i+63:i]) + ELSE + dst[l+7:l] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit - integers with unsigned saturation, and store the results in "dst". - - FOR j := 0 to 7 - i := 64*j - k := 32*j - dst[k+31:k] := SaturateU32(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 32*j + dst[k+31:k] := SaturateU32(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit - integers with unsigned saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 32*j - IF k[j] - dst[l+31:l] := SaturateU32(a[i+63:i]) - ELSE - dst[l+31:l] := src[l+31:l] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := SaturateU32(a[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- Store - - - - - Convert packed unsigned 64-bit integers in "a" to packed 32-bit integers with - unsigned saturation, and store the active results (those with their respective bit set - in writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 7 - i := 64*j - l := 32*j - IF k[j] - MEM[base_addr+l+31:base_addr+l] := SaturateU32(a[i+63:i]) - FI - ENDFOR - - - AVX512F -
immintrin.h
- Convert + Store + + + + + Convert packed unsigned 64-bit integers in "a" to packed 32-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + MEM[base_addr+l+31:base_addr+l] := SaturateU32(a[i+63:i]) + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit - integers with unsigned saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 32*j - IF k[j] - dst[l+31:l] := SaturateU32(a[i+63:i]) - ELSE - dst[l+31:l] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[l+31:l] := SaturateU32(a[i+63:i]) + ELSE + dst[l+31:l] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit - integers with unsigned saturation, and store the results in "dst". - - FOR j := 0 to 7 - i := 64*j - k := 16*j - dst[k+15:k] := SaturateU16(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 16*j + dst[k+15:k] := SaturateU16(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit - integers with unsigned saturation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 16*j - IF k[j] - dst[l+15:l] := SaturateU16(a[i+63:i]) - ELSE - dst[l+15:l] := src[l+15:l] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := SaturateU16(a[i+63:i]) + ELSE + dst[l+15:l] := src[l+15:l] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- Store - - - - - Convert packed unsigned 64-bit integers in "a" to packed 16-bit integers with - unsigned saturation, and store the active results (those with their respective bit set - in writemask "k") to unaligned memory at "base_addr". - - FOR j := 0 to 7 - i := 64*j - l := 16*j - IF k[j] - MEM[base_addr+l+15:base_addr+l] := SaturateU16(a[i+63:i]) - FI - ENDFOR - - - AVX512F -
immintrin.h
- Convert + Store + + + + + Convert packed unsigned 64-bit integers in "a" to packed 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +FOR j := 0 to 7 + i := 64*j + l := 16*j + IF k[j] + MEM[base_addr+l+15:base_addr+l] := SaturateU16(a[i+63:i]) + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit - integers with unsigned saturation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 16*j - IF k[j] - dst[l+15:l] := SaturateU16(a[i+63:i]) - ELSE - dst[l+15:l] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 16*j + IF k[j] + dst[l+15:l] := SaturateU16(a[i+63:i]) + ELSE + dst[l+15:l] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Zero extend packed unsigned 8-bit integers in "a" to packed 32-bit integers, - and store the results in "dst". - - FOR j := 0 to 15 - i := 32*j - k := 8*j - dst[i+31:i] := ZeroExtend32(a[k+7:k]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Zero extend packed unsigned 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + k := 8*j + dst[i+31:i] := ZeroExtend32(a[k+7:k]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Zero extend packed unsigned 8-bit integers in "a" to packed 32-bit integers, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - l := 8*j - IF k[j] - dst[i+31:i] := ZeroExtend32(a[l+7:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Zero extend packed unsigned 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 8*j + IF k[j] + dst[i+31:i] := ZeroExtend32(a[l+7:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Zero extend packed unsigned 8-bit integers in "a" to packed 32-bit integers, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - l := 8*j - IF k[j] - dst[i+31:i] := ZeroExtend32(a[l+7:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Zero extend packed unsigned 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 8*j + IF k[j] + dst[i+31:i] := ZeroExtend32(a[l+7:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Zero extend packed unsigned 8-bit integers in the low 8 byte sof "a" to packed - 64-bit integers, and store the results in "dst". - - FOR j := 0 to 7 - i := 64*j - k := 8*j - dst[i+63:i] := ZeroExtend64(a[k+7:k]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Zero extend packed unsigned 8-bit integers in the low 8 byte sof "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 8*j + dst[i+63:i] := ZeroExtend64(a[k+7:k]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Zero extend packed unsigned 8-bit integers in the low 8 bytes of "a" to packed - 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 8*j - IF k[j] - dst[i+63:i] := ZeroExtend64(a[l+7:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Zero extend packed unsigned 8-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 8*j + IF k[j] + dst[i+63:i] := ZeroExtend64(a[l+7:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Zero extend packed unsigned 8-bit integers in the low 8 bytes of "a" to packed - 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 8*j - IF k[j] - dst[i+63:i] := ZeroExtend64(a[l+7:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Zero extend packed unsigned 8-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 8*j + IF k[j] + dst[i+63:i] := ZeroExtend64(a[l+7:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, - and store the results in "dst". - - FOR j := 0 to 7 - i := 64*j - k := 32*j - dst[i+63:i] := ZeroExtend64(a[k+31:k]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 32*j + dst[i+63:i] := ZeroExtend64(a[k+31:k]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 32*j - IF k[j] - dst[i+63:i] := ZeroExtend64(a[l+31:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := ZeroExtend64(a[l+31:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 32*j - IF k[j] - dst[i+63:i] := ZeroExtend64(a[l+31:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 32*j + IF k[j] + dst[i+63:i] := ZeroExtend64(a[l+31:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, - and store the results in "dst". - - FOR j := 0 to 15 - i := 32*j - k := 16*j - dst[i+31:i] := ZeroExtend32(a[k+15:k]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 15 + i := 32*j + k := 16*j + dst[i+31:i] := ZeroExtend32(a[k+15:k]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - l := 16*j - IF k[j] - dst[i+31:i] := ZeroExtend32(a[l+15:l]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 16*j + IF k[j] + dst[i+31:i] := ZeroExtend32(a[l+15:l]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := 32*j - l := 16*j - IF k[j] - dst[i+31:i] := ZeroExtend32(a[l+15:l]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := 32*j + l := 16*j + IF k[j] + dst[i+31:i] := ZeroExtend32(a[l+15:l]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Zero extend packed unsigned 16-bit integers in "a" to packed 64-bit integers, - and store the results in "dst". - - FOR j := 0 to 7 - i := 64*j - k := 16*j - dst[i+63:i] := ZeroExtend64(a[k+15:k]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Zero extend packed unsigned 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := 64*j + k := 16*j + dst[i+63:i] := ZeroExtend64(a[k+15:k]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Zero extend packed unsigned 16-bit integers in "a" to packed 64-bit integers, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 16*j - IF k[j] - dst[i+63:i] := ZeroExtend64(a[l+15:l]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Zero extend packed unsigned 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 16*j + IF k[j] + dst[i+63:i] := ZeroExtend64(a[l+15:l]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - Zero extend packed unsigned 16-bit integers in "a" to packed 64-bit integers, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := 64*j - l := 16*j - IF k[j] - dst[i+63:i] := ZeroExtend64(a[l+15:l]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + Zero extend packed unsigned 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := 64*j + l := 16*j + IF k[j] + dst[i+63:i] := ZeroExtend64(a[l+15:l]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Copy the lower single-precision (32-bit) floating-point element of "a" to - "dst". - - dst[31:0] := a[31:0] - - - AVX512F -
immintrin.h
- Convert + + + Copy the lower single-precision (32-bit) floating-point element of "a" to "dst". + +dst[31:0] := a[31:0] + + + AVX512F +
immintrin.h
+ Convert
- - - Copy the lower double-precision (64-bit) floating-point element of "a" to - "dst". - - dst[63:0] := a[63:0] - - - AVX512F -
immintrin.h
- Convert + + + Copy the lower double-precision (64-bit) floating-point element of "a" to "dst". + +dst[63:0] := a[63:0] + + + AVX512F +
immintrin.h
+ Convert
- - - Copy the lower 32-bit integer in "a" to "dst". - - dst[31:0] := a[31:0] - - - AVX512F -
immintrin.h
- Convert + + + Copy the lower 32-bit integer in "a" to "dst". + +dst[31:0] := a[31:0] + + + AVX512F +
immintrin.h
+ Convert
- - - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and - "b", and store packed maximum values in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). [max_float_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [max_float_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and - "b", and store packed maximum values in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). [sae_note][max_float_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note][max_float_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and - "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). [max_float_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [max_float_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and - "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). [sae_note][max_float_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note][max_float_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Compare packed double-precision (64-bit) floating-point elements in "a" and - "b", and store packed maximum values in "dst". [max_float_note] - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst". [max_float_note] + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and - "b", and store packed maximum values in "dst". [sae_note][max_float_note] - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst". [sae_note][max_float_note] + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and - "b", and store packed maximum values in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). [max_float_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [max_float_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and - "b", and store packed maximum values in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). [sae_note][max_float_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note][max_float_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and - "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). [max_float_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [max_float_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and - "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). [sae_note][max_float_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note][max_float_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Compare packed single-precision (32-bit) floating-point elements in "a" and - "b", and store packed maximum values in "dst". [max_float_note] - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst". [max_float_note] + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and - "b", and store packed maximum values in "dst". [sae_note][max_float_note] - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst". [sae_note][max_float_note] + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - - Compare the lower double-precision (64-bit) floating-point elements in "a" and - "b", store the maximum value in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper element - from "a" to the upper element of "dst". [sae_note][max_float_note] - - IF k[0] - dst[63:0] := MAX(a[63:0], b[63:0]) - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". [sae_note][max_float_note] + +IF k[0] + dst[63:0] := MAX(a[63:0], b[63:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - Compare the lower double-precision (64-bit) floating-point elements in "a" and - "b", store the maximum value in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper element - from "a" to the upper element of "dst". - - IF k[0] - dst[63:0] := MAX(a[63:0], b[63:0]) - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := MAX(a[63:0], b[63:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - Compare the lower double-precision (64-bit) floating-point elements in "a" and - "b", store the maximum value in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" - to the upper element of "dst". [sae_note][max_float_note] - - IF k[0] - dst[63:0] := MAX(a[63:0], b[63:0]) - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". [sae_note][max_float_note] + +IF k[0] + dst[63:0] := MAX(a[63:0], b[63:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Compare the lower double-precision (64-bit) floating-point elements in "a" and - "b", store the maximum value in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" - to the upper element of "dst". - - IF k[0] - dst[63:0] := MAX(a[63:0], b[63:0]) - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := MAX(a[63:0], b[63:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Compare the lower double-precision (64-bit) floating-point elements in "a" and - "b", store the maximum value in the lower element of "dst", and copy the upper element - from "a" to the upper element of "dst". [sae_note][max_float_note] - - dst[63:0] := MAX(a[63:0], b[63:0]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". [sae_note][max_float_note] + +dst[63:0] := MAX(a[63:0], b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - - Compare the lower single-precision (32-bit) floating-point elements in "a" and - "b", store the maximum value in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed - elements from "a" to the upper elements of "dst". [sae_note][max_float_note] - - IF k[0] - dst[31:0] := MAX(a[31:0], b[31:0]) - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". [sae_note][max_float_note] + +IF k[0] + dst[31:0] := MAX(a[31:0], b[31:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - Compare the lower single-precision (32-bit) floating-point elements in "a" and - "b", store the maximum value in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed - elements from "a" to the upper elements of "dst". - - IF k[0] - dst[31:0] := MAX(a[31:0], b[31:0]) - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := MAX(a[31:0], b[31:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - Compare the lower single-precision (32-bit) floating-point elements in "a" and - "b", store the maximum value in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements - from "a" to the upper elements of "dst". [sae_note][max_float_note] - - IF k[0] - dst[31:0] := MAX(a[31:0], b[31:0]) - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". [sae_note][max_float_note] + +IF k[0] + dst[31:0] := MAX(a[31:0], b[31:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Compare the lower single-precision (32-bit) floating-point elements in "a" and - "b", store the maximum value in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements - from "a" to the upper elements of "dst". - - IF k[0] - dst[31:0] := MAX(a[31:0], b[31:0]) - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := MAX(a[31:0], b[31:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Compare the lower single-precision (32-bit) floating-point elements in "a" and - "b", store the maximum value in the lower element of "dst", and copy the upper 3 packed - elements from "a" to the upper elements of "dst". [sae_note][max_float_note] - - dst[31:0] := MAX(a[31:0], b[31:0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". [sae_note][max_float_note] + +dst[31:0] := MAX(a[31:0], b[31:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and - "b", and store packed minimum values in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). [min_float_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [min_float_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and - "b", and store packed minimum values in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). [sae_note][min_float_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note][min_float_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and - "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). [min_float_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [min_float_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and - "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). [sae_note][min_float_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note][min_float_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Compare packed double-precision (64-bit) floating-point elements in "a" and - "b", and store packed minimum values in "dst". [min_float_note] - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst". [min_float_note] + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and - "b", and store packed minimum values in "dst". [sae_note][min_float_note] - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst". [sae_note][min_float_note] + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and - "b", and store packed minimum values in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). [min_float_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [min_float_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and - "b", and store packed minimum values in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). [sae_note][min_float_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note][min_float_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and - "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). [min_float_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [min_float_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and - "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). [sae_note][min_float_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note][min_float_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Compare packed single-precision (32-bit) floating-point elements in "a" and - "b", and store packed minimum values in "dst". [min_float_note] - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst". [min_float_note] + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and - "b", and store packed minimum values in "dst". [sae_note][min_float_note] - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst". [sae_note][min_float_note] + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - - Compare the lower double-precision (64-bit) floating-point elements in "a" and - "b", store the minimum value in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper element - from "a" to the upper element of "dst". [sae_note][min_float_note] - - IF k[0] - dst[63:0] := MIN(a[63:0], b[63:0]) - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". [sae_note][min_float_note] + +IF k[0] + dst[63:0] := MIN(a[63:0], b[63:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - Compare the lower double-precision (64-bit) floating-point elements in "a" and - "b", store the minimum value in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper element - from "a" to the upper element of "dst". - - IF k[0] - dst[63:0] := MIN(a[63:0], b[63:0]) - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := MIN(a[63:0], b[63:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - Compare the lower double-precision (64-bit) floating-point elements in "a" and - "b", store the minimum value in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" - to the upper element of "dst". [sae_note][min_float_note] - - IF k[0] - dst[63:0] := MIN(a[63:0], b[63:0]) - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". [sae_note][min_float_note] + +IF k[0] + dst[63:0] := MIN(a[63:0], b[63:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Compare the lower double-precision (64-bit) floating-point elements in "a" and - "b", store the minimum value in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" - to the upper element of "dst". - - IF k[0] - dst[63:0] := MIN(a[63:0], b[63:0]) - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := MIN(a[63:0], b[63:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Compare the lower double-precision (64-bit) floating-point elements in "a" and - "b", store the minimum value in the lower element of "dst" , and copy the upper element - from "a" to the upper element of "dst". [sae_note][min_float_note] - - dst[63:0] := MIN(a[63:0], b[63:0]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" , and copy the upper element from "a" to the upper element of "dst". [sae_note][min_float_note] + +dst[63:0] := MIN(a[63:0], b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - - Compare the lower single-precision (32-bit) floating-point elements in "a" and - "b", store the minimum value in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed - elements from "a" to the upper elements of "dst". [sae_note][min_float_note] - - IF k[0] - dst[31:0] := MIN(a[31:0], b[31:0]) - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". [sae_note][min_float_note] + +IF k[0] + dst[31:0] := MIN(a[31:0], b[31:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - Compare the lower single-precision (32-bit) floating-point elements in "a" and - "b", store the minimum value in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed - elements from "a" to the upper elements of "dst". - - IF k[0] - dst[31:0] := MIN(a[31:0], b[31:0]) - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := MIN(a[31:0], b[31:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - Compare the lower single-precision (32-bit) floating-point elements in "a" and - "b", store the minimum value in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements - from "a" to the upper elements of "dst". [sae_note][min_float_note] - - IF k[0] - dst[31:0] := MIN(a[31:0], b[31:0]) - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". [sae_note][min_float_note] + +IF k[0] + dst[31:0] := MIN(a[31:0], b[31:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Compare the lower single-precision (32-bit) floating-point elements in "a" and - "b", store the minimum value in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements - from "a" to the upper elements of "dst". - - IF k[0] - dst[31:0] := MIN(a[31:0], b[31:0]) - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := MIN(a[31:0], b[31:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Compare the lower single-precision (32-bit) floating-point elements in "a" and - "b", store the minimum value in the lower element of "dst", and copy the upper 3 packed - elements from "a" to the upper elements of "dst". [sae_note][min_float_note] - - dst[31:0] := MIN(a[31:0], b[31:0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". [sae_note][min_float_note] + +dst[31:0] := MIN(a[31:0], b[31:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - Compute the absolute value of packed signed 32-bit integers in "a", and store - the unsigned results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := ABS(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + Compute the absolute value of packed signed 32-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ABS(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Compute the absolute value of packed signed 32-bit integers in "a", and store - the unsigned results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ABS(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Compute the absolute value of packed signed 32-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ABS(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Compute the absolute value of packed signed 32-bit integers in "a", and store - the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ABS(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + Compute the absolute value of packed signed 32-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ABS(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - Compute the absolute value of packed signed 64-bit integers in "a", and store - the unsigned results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := ABS(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + Compute the absolute value of packed signed 64-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ABS(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Compute the absolute value of packed signed 64-bit integers in "a", and store - the unsigned results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ABS(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Compute the absolute value of packed signed 64-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ABS(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Compute the absolute value of packed signed 64-bit integers in "a", and store - the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ABS(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + Compute the absolute value of packed signed 64-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ABS(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Compare packed signed 32-bit integers in "a" and "b", and store packed maximum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Compare packed signed 32-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - Compare packed signed 64-bit integers in "a" and "b", and store packed maximum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + Compare packed signed 64-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Compare packed signed 64-bit integers in "a" and "b", and store packed maximum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Compare packed signed 64-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Compare packed signed 64-bit integers in "a" and "b", and store packed maximum - values in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + Compare packed signed 64-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Compare packed unsigned 32-bit integers in "a" and "b", and store packed - maximum values in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - Compare packed unsigned 64-bit integers in "a" and "b", and store packed - maximum values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Compare packed unsigned 64-bit integers in "a" and "b", and store packed - maximum values in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Compare packed unsigned 64-bit integers in "a" and "b", and store packed - maximum values in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Compare packed signed 32-bit integers in "a" and "b", and store packed minimum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Compare packed signed 32-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - Compare packed signed 64-bit integers in "a" and "b", and store packed minimum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + Compare packed signed 64-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Compare packed signed 64-bit integers in "a" and "b", and store packed minimum - values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Compare packed signed 64-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Compare packed signed 64-bit integers in "a" and "b", and store packed minimum - values in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + Compare packed signed 64-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Compare packed unsigned 32-bit integers in "a" and "b", and store packed - minimum values in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - Compare packed unsigned 64-bit integers in "a" and "b", and store packed - minimum values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - Compare packed unsigned 64-bit integers in "a" and "b", and store packed - minimum values in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Compare packed unsigned 64-bit integers in "a" and "b", and store packed - minimum values in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Move packed double-precision (64-bit) floating-point elements from "a" into - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Move + + + + Move packed double-precision (64-bit) floating-point elements from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Move
- - - - Move packed single-precision (32-bit) floating-point elements from "a" into - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Move + + + + Move packed single-precision (32-bit) floating-point elements from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Move
- - - - - Duplicate even-indexed double-precision (64-bit) floating-point elements from - "a", and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - tmp[63:0] := a[63:0] - tmp[127:64] := a[63:0] - tmp[191:128] := a[191:128] - tmp[255:192] := a[191:128] - tmp[319:256] := a[319:256] - tmp[383:320] := a[319:256] - tmp[447:384] := a[447:384] - tmp[511:448] := a[447:384] - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Move + + + + + Duplicate even-indexed double-precision (64-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[63:0] := a[63:0] +tmp[127:64] := a[63:0] +tmp[191:128] := a[191:128] +tmp[255:192] := a[191:128] +tmp[319:256] := a[319:256] +tmp[383:320] := a[319:256] +tmp[447:384] := a[447:384] +tmp[511:448] := a[447:384] +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Move
- - - - Duplicate even-indexed double-precision (64-bit) floating-point elements from - "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - tmp[63:0] := a[63:0] - tmp[127:64] := a[63:0] - tmp[191:128] := a[191:128] - tmp[255:192] := a[191:128] - tmp[319:256] := a[319:256] - tmp[383:320] := a[319:256] - tmp[447:384] := a[447:384] - tmp[511:448] := a[447:384] - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := tmp[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Move + + + + Duplicate even-indexed double-precision (64-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[63:0] := a[63:0] +tmp[127:64] := a[63:0] +tmp[191:128] := a[191:128] +tmp[255:192] := a[191:128] +tmp[319:256] := a[319:256] +tmp[383:320] := a[319:256] +tmp[447:384] := a[447:384] +tmp[511:448] := a[447:384] +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := tmp[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Move
- - - Duplicate even-indexed double-precision (64-bit) floating-point elements from - "a", and store the results in "dst". - - dst[63:0] := a[63:0] - dst[127:64] := a[63:0] - dst[191:128] := a[191:128] - dst[255:192] := a[191:128] - dst[319:256] := a[319:256] - dst[383:320] := a[319:256] - dst[447:384] := a[447:384] - dst[511:448] := a[447:384] - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Move + + + Duplicate even-indexed double-precision (64-bit) floating-point elements from "a", and store the results in "dst". + +dst[63:0] := a[63:0] +dst[127:64] := a[63:0] +dst[191:128] := a[191:128] +dst[255:192] := a[191:128] +dst[319:256] := a[319:256] +dst[383:320] := a[319:256] +dst[447:384] := a[447:384] +dst[511:448] := a[447:384] +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Move
- - - - Move packed 32-bit integers from "a" into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Move + + + + Move packed 32-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Move
- - - - Move packed 64-bit integers from "a" into "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Move + + + + Move packed 64-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Move
- - - - - - Move the lower double-precision (64-bit) floating-point element from "b" to the - lower element of "dst" using writemask "k" (the element is copied from "src" when mask - bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". - - IF k[0] - dst[63:0] := b[63:0] - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Move + + + + + + Move the lower double-precision (64-bit) floating-point element from "b" to the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := b[63:0] +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Move
- - - - - Move the lower double-precision (64-bit) floating-point element from "b" to the - lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is - not set), and copy the upper element from "a" to the upper element of "dst". - - IF k[0] - dst[63:0] := b[63:0] - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Move + + + + + Move the lower double-precision (64-bit) floating-point element from "b" to the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := b[63:0] +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Move
- - - - - Duplicate odd-indexed single-precision (32-bit) floating-point elements from - "a", and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - tmp[31:0] := a[63:32] - tmp[63:32] := a[63:32] - tmp[95:64] := a[127:96] - tmp[127:96] := a[127:96] - tmp[159:128] := a[191:160] - tmp[191:160] := a[191:160] - tmp[223:192] := a[255:224] - tmp[255:224] := a[255:224] - tmp[287:256] := a[319:288] - tmp[319:288] := a[319:288] - tmp[351:320] := a[383:352] - tmp[383:352] := a[383:352] - tmp[415:384] := a[447:416] - tmp[447:416] := a[447:416] - tmp[479:448] := a[511:480] - tmp[511:480] := a[511:480] - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Move + + + + + Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[31:0] := a[63:32] +tmp[63:32] := a[63:32] +tmp[95:64] := a[127:96] +tmp[127:96] := a[127:96] +tmp[159:128] := a[191:160] +tmp[191:160] := a[191:160] +tmp[223:192] := a[255:224] +tmp[255:224] := a[255:224] +tmp[287:256] := a[319:288] +tmp[319:288] := a[319:288] +tmp[351:320] := a[383:352] +tmp[383:352] := a[383:352] +tmp[415:384] := a[447:416] +tmp[447:416] := a[447:416] +tmp[479:448] := a[511:480] +tmp[511:480] := a[511:480] +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Move
- - - - Duplicate odd-indexed single-precision (32-bit) floating-point elements from - "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - tmp[31:0] := a[63:32] - tmp[63:32] := a[63:32] - tmp[95:64] := a[127:96] - tmp[127:96] := a[127:96] - tmp[159:128] := a[191:160] - tmp[191:160] := a[191:160] - tmp[223:192] := a[255:224] - tmp[255:224] := a[255:224] - tmp[287:256] := a[319:288] - tmp[319:288] := a[319:288] - tmp[351:320] := a[383:352] - tmp[383:352] := a[383:352] - tmp[415:384] := a[447:416] - tmp[447:416] := a[447:416] - tmp[479:448] := a[511:480] - tmp[511:480] := a[511:480] - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Move + + + + Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[31:0] := a[63:32] +tmp[63:32] := a[63:32] +tmp[95:64] := a[127:96] +tmp[127:96] := a[127:96] +tmp[159:128] := a[191:160] +tmp[191:160] := a[191:160] +tmp[223:192] := a[255:224] +tmp[255:224] := a[255:224] +tmp[287:256] := a[319:288] +tmp[319:288] := a[319:288] +tmp[351:320] := a[383:352] +tmp[383:352] := a[383:352] +tmp[415:384] := a[447:416] +tmp[447:416] := a[447:416] +tmp[479:448] := a[511:480] +tmp[511:480] := a[511:480] +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Move
- - - Duplicate odd-indexed single-precision (32-bit) floating-point elements from - "a", and store the results in "dst". - - dst[31:0] := a[63:32] - dst[63:32] := a[63:32] - dst[95:64] := a[127:96] - dst[127:96] := a[127:96] - dst[159:128] := a[191:160] - dst[191:160] := a[191:160] - dst[223:192] := a[255:224] - dst[255:224] := a[255:224] - dst[287:256] := a[319:288] - dst[319:288] := a[319:288] - dst[351:320] := a[383:352] - dst[383:352] := a[383:352] - dst[415:384] := a[447:416] - dst[447:416] := a[447:416] - dst[479:448] := a[511:480] - dst[511:480] := a[511:480] - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Move + + + Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst". + +dst[31:0] := a[63:32] +dst[63:32] := a[63:32] +dst[95:64] := a[127:96] +dst[127:96] := a[127:96] +dst[159:128] := a[191:160] +dst[191:160] := a[191:160] +dst[223:192] := a[255:224] +dst[255:224] := a[255:224] +dst[287:256] := a[319:288] +dst[319:288] := a[319:288] +dst[351:320] := a[383:352] +dst[383:352] := a[383:352] +dst[415:384] := a[447:416] +dst[447:416] := a[447:416] +dst[479:448] := a[511:480] +dst[511:480] := a[511:480] +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Move
- - - - - Duplicate even-indexed single-precision (32-bit) floating-point elements from - "a", and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - tmp[31:0] := a[31:0] - tmp[63:32] := a[31:0] - tmp[95:64] := a[95:64] - tmp[127:96] := a[95:64] - tmp[159:128] := a[159:128] - tmp[191:160] := a[159:128] - tmp[223:192] := a[223:192] - tmp[255:224] := a[223:192] - tmp[287:256] := a[287:256] - tmp[319:288] := a[287:256] - tmp[351:320] := a[351:320] - tmp[383:352] := a[351:320] - tmp[415:384] := a[415:384] - tmp[447:416] := a[415:384] - tmp[479:448] := a[479:448] - tmp[511:480] := a[479:448] - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Move + + + + + Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +tmp[31:0] := a[31:0] +tmp[63:32] := a[31:0] +tmp[95:64] := a[95:64] +tmp[127:96] := a[95:64] +tmp[159:128] := a[159:128] +tmp[191:160] := a[159:128] +tmp[223:192] := a[223:192] +tmp[255:224] := a[223:192] +tmp[287:256] := a[287:256] +tmp[319:288] := a[287:256] +tmp[351:320] := a[351:320] +tmp[383:352] := a[351:320] +tmp[415:384] := a[415:384] +tmp[447:416] := a[415:384] +tmp[479:448] := a[479:448] +tmp[511:480] := a[479:448] +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Move
- - - - Duplicate even-indexed single-precision (32-bit) floating-point elements from - "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - tmp[31:0] := a[31:0] - tmp[63:32] := a[31:0] - tmp[95:64] := a[95:64] - tmp[127:96] := a[95:64] - tmp[159:128] := a[159:128] - tmp[191:160] := a[159:128] - tmp[223:192] := a[223:192] - tmp[255:224] := a[223:192] - tmp[287:256] := a[287:256] - tmp[319:288] := a[287:256] - tmp[351:320] := a[351:320] - tmp[383:352] := a[351:320] - tmp[415:384] := a[415:384] - tmp[447:416] := a[415:384] - tmp[479:448] := a[479:448] - tmp[511:480] := a[479:448] - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Move + + + + Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +tmp[31:0] := a[31:0] +tmp[63:32] := a[31:0] +tmp[95:64] := a[95:64] +tmp[127:96] := a[95:64] +tmp[159:128] := a[159:128] +tmp[191:160] := a[159:128] +tmp[223:192] := a[223:192] +tmp[255:224] := a[223:192] +tmp[287:256] := a[287:256] +tmp[319:288] := a[287:256] +tmp[351:320] := a[351:320] +tmp[383:352] := a[351:320] +tmp[415:384] := a[415:384] +tmp[447:416] := a[415:384] +tmp[479:448] := a[479:448] +tmp[511:480] := a[479:448] +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Move
- - - Duplicate even-indexed single-precision (32-bit) floating-point elements from - "a", and store the results in "dst". - - dst[31:0] := a[31:0] - dst[63:32] := a[31:0] - dst[95:64] := a[95:64] - dst[127:96] := a[95:64] - dst[159:128] := a[159:128] - dst[191:160] := a[159:128] - dst[223:192] := a[223:192] - dst[255:224] := a[223:192] - dst[287:256] := a[287:256] - dst[319:288] := a[287:256] - dst[351:320] := a[351:320] - dst[383:352] := a[351:320] - dst[415:384] := a[415:384] - dst[447:416] := a[415:384] - dst[479:448] := a[479:448] - dst[511:480] := a[479:448] - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Move + + + Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst". + +dst[31:0] := a[31:0] +dst[63:32] := a[31:0] +dst[95:64] := a[95:64] +dst[127:96] := a[95:64] +dst[159:128] := a[159:128] +dst[191:160] := a[159:128] +dst[223:192] := a[223:192] +dst[255:224] := a[223:192] +dst[287:256] := a[287:256] +dst[319:288] := a[287:256] +dst[351:320] := a[351:320] +dst[383:352] := a[351:320] +dst[415:384] := a[415:384] +dst[447:416] := a[415:384] +dst[479:448] := a[479:448] +dst[511:480] := a[479:448] +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Move
- - - - - - Move the lower single-precision (32-bit) floating-point element from "b" to the - lower element of "dst" using writemask "k" (the element is copied from "src" when mask - bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements - of "dst". - - IF k[0] - dst[31:0] := b[31:0] - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Move + + + + + + Move the lower single-precision (32-bit) floating-point element from "b" to the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := b[31:0] +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Move
- - - - - Move the lower single-precision (32-bit) floating-point element from "b" to the - lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is - not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". - - IF k[0] - dst[31:0] := b[31:0] - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Move + + + + + Move the lower single-precision (32-bit) floating-point element from "b" to the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := b[31:0] +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Move
- - - - - Compute the bitwise AND of packed 32-bit integers in "a" and "b", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] AND b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + + Compute the bitwise AND of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] AND b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - - Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := (NOT a[i+31:i]) AND b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + + Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (NOT a[i+31:i]) AND b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - - Compute the bitwise NOT of packed 64-bit integers in "a" and then AND with "b", - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := (NOT a[i+63:i]) AND b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + + Compute the bitwise NOT of packed 64-bit integers in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (NOT a[i+63:i]) AND b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - - Compute the bitwise AND of packed 64-bit integers in "a" and "b", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] AND b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + + Compute the bitwise AND of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] AND b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - - Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] OR b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + + Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] OR b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - - Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] OR b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + + Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] OR b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - - - - Bitwise ternary logic that provides the capability to implement any - three-operand binary function; the specific binary function is specified by value in - "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", - and "c" are used according to "imm8", and the result is written to the corresponding bit - in "dst" using writemask "k" at 32-bit granularity (32-bit elements are copied from "a" - when the corresponding mask bit is not set). - - DEFINE TernaryOP(imm8, a, b, c) { - CASE imm8[7:0] OF - 0: dst[0] := 0 // imm8[7:0] := 0 - 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C) - // ... - 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C - 255: dst[0] := 1 // imm8[7:0] := 1 - ESAC - } - imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) - FOR j := 0 to 15 - i := j*32 - IF k[j] - FOR h := 0 to 31 + + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", and "c" are used according to "imm8", and the result is written to the corresponding bit in "dst" using writemask "k" at 32-bit granularity (32-bit elements are copied from "a" when the corresponding mask bit is not set). + +DEFINE TernaryOP(imm8, a, b, c) { + CASE imm8[7:0] OF + 0: dst[0] := 0 // imm8[7:0] := 0 + 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C) + // ... + 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C + 255: dst[0] := 1 // imm8[7:0] := 1 + ESAC +} +imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) +FOR j := 0 to 15 + i := j*32 + IF k[j] + FOR h := 0 to 31 dst[i+h] := TernaryOP(imm8[7:0], a[i+h], b[i+h], c[i+h]) - ENDFOR - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + ENDFOR + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Logical
- - - - - - - Bitwise ternary logic that provides the capability to implement any - three-operand binary function; the specific binary function is specified by value in - "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", - and "c" are used according to "imm8", and the result is written to the corresponding bit - in "dst" using zeromask "k" at 32-bit granularity (32-bit elements are zeroed out when - the corresponding mask bit is not set). - - DEFINE TernaryOP(imm8, a, b, c) { - CASE imm8[7:0] OF - 0: dst[0] := 0 // imm8[7:0] := 0 - 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C) - // ... - 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C - 255: dst[0] := 1 // imm8[7:0] := 1 - ESAC - } - imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) - FOR j := 0 to 15 - i := j*32 - IF k[j] - FOR h := 0 to 31 + + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", and "c" are used according to "imm8", and the result is written to the corresponding bit in "dst" using zeromask "k" at 32-bit granularity (32-bit elements are zeroed out when the corresponding mask bit is not set). + +DEFINE TernaryOP(imm8, a, b, c) { + CASE imm8[7:0] OF + 0: dst[0] := 0 // imm8[7:0] := 0 + 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C) + // ... + 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C + 255: dst[0] := 1 // imm8[7:0] := 1 + ESAC +} +imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) +FOR j := 0 to 15 + i := j*32 + IF k[j] + FOR h := 0 to 31 dst[i+h] := TernaryOP(imm8[7:0], a[i+h], b[i+h], c[i+h]) - ENDFOR - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + ENDFOR + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Logical
- - - - - - Bitwise ternary logic that provides the capability to implement any - three-operand binary function; the specific binary function is specified by value in - "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", - and "c" are used according to "imm8", and the result is written to the corresponding bit - in "dst". - - DEFINE TernaryOP(imm8, a, b, c) { - CASE imm8[7:0] OF - 0: dst[0] := 0 // imm8[7:0] := 0 - 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C) - // ... - 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C - 255: dst[0] := 1 // imm8[7:0] := 1 - ESAC - } - imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) - FOR j := 0 to 15 - i := j*32 - FOR h := 0 to 31 - dst[i+h] := TernaryOP(imm8[7:0], a[i+h], b[i+h], c[i+h]) - ENDFOR - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", and "c" are used according to "imm8", and the result is written to the corresponding bit in "dst". + +DEFINE TernaryOP(imm8, a, b, c) { + CASE imm8[7:0] OF + 0: dst[0] := 0 // imm8[7:0] := 0 + 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C) + // ... + 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C + 255: dst[0] := 1 // imm8[7:0] := 1 + ESAC +} +imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) +FOR j := 0 to 15 + i := j*32 + FOR h := 0 to 31 + dst[i+h] := TernaryOP(imm8[7:0], a[i+h], b[i+h], c[i+h]) + ENDFOR +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - - - - Bitwise ternary logic that provides the capability to implement any - three-operand binary function; the specific binary function is specified by value in - "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", - and "c" are used according to "imm8", and the result is written to the corresponding bit - in "dst" using writemask "k" at 64-bit granularity (64-bit elements are copied from "a" - when the corresponding mask bit is not set). - - DEFINE TernaryOP(imm8, a, b, c) { - CASE imm8[7:0] OF - 0: dst[0] := 0 // imm8[7:0] := 0 - 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C) - // ... - 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C - 255: dst[0] := 1 // imm8[7:0] := 1 - ESAC - } - imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) - FOR j := 0 to 7 - i := j*64 - IF k[j] - FOR h := 0 to 63 + + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", and "c" are used according to "imm8", and the result is written to the corresponding bit in "dst" using writemask "k" at 64-bit granularity (64-bit elements are copied from "a" when the corresponding mask bit is not set). + +DEFINE TernaryOP(imm8, a, b, c) { + CASE imm8[7:0] OF + 0: dst[0] := 0 // imm8[7:0] := 0 + 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C) + // ... + 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C + 255: dst[0] := 1 // imm8[7:0] := 1 + ESAC +} +imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) +FOR j := 0 to 7 + i := j*64 + IF k[j] + FOR h := 0 to 63 dst[i+h] := TernaryOP(imm8[7:0], a[i+h], b[i+h], c[i+h]) - ENDFOR - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + ENDFOR + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Logical
- - - - - - - Bitwise ternary logic that provides the capability to implement any - three-operand binary function; the specific binary function is specified by value in - "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", - and "c" are used according to "imm8", and the result is written to the corresponding bit - in "dst" using zeromask "k" at 64-bit granularity (64-bit elements are zeroed out when - the corresponding mask bit is not set). - - DEFINE TernaryOP(imm8, a, b, c) { - CASE imm8[7:0] OF - 0: dst[0] := 0 // imm8[7:0] := 0 - 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C) - // ... - 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C - 255: dst[0] := 1 // imm8[7:0] := 1 - ESAC - } - imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) - FOR j := 0 to 7 - i := j*64 - IF k[j] - FOR h := 0 to 63 + + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", and "c" are used according to "imm8", and the result is written to the corresponding bit in "dst" using zeromask "k" at 64-bit granularity (64-bit elements are zeroed out when the corresponding mask bit is not set). + +DEFINE TernaryOP(imm8, a, b, c) { + CASE imm8[7:0] OF + 0: dst[0] := 0 // imm8[7:0] := 0 + 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C) + // ... + 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C + 255: dst[0] := 1 // imm8[7:0] := 1 + ESAC +} +imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) +FOR j := 0 to 7 + i := j*64 + IF k[j] + FOR h := 0 to 63 dst[i+h] := TernaryOP(imm8[7:0], a[i+h], b[i+h], c[i+h]) - ENDFOR - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + ENDFOR + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Logical
- - - - - - Bitwise ternary logic that provides the capability to implement any - three-operand binary function; the specific binary function is specified by value in - "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", - and "c" are used according to "imm8", and the result is written to the corresponding bit - in "dst". - - DEFINE TernaryOP(imm8, a, b, c) { - CASE imm8[7:0] OF - 0: dst[0] := 0 // imm8[7:0] := 0 - 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C) - // ... - 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR - _MM_TERNLOG_C - 255: dst[0] := 1 // imm8[7:0] := 1 - ESAC - } - imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) - FOR j := 0 to 7 - i := j*64 - FOR h := 0 to 63 - dst[i+h] := TernaryOP(imm8[7:0], a[i+h], b[i+h], c[i+h]) - ENDFOR - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + + + Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", and "c" are used according to "imm8", and the result is written to the corresponding bit in "dst". + +DEFINE TernaryOP(imm8, a, b, c) { + CASE imm8[7:0] OF + 0: dst[0] := 0 // imm8[7:0] := 0 + 1: dst[0] := NOT (a OR b OR c) // imm8[7:0] := NOT (_MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C) + // ... + 254: dst[0] := a OR b OR c // imm8[7:0] := _MM_TERNLOG_A OR _MM_TERNLOG_B OR _MM_TERNLOG_C + 255: dst[0] := 1 // imm8[7:0] := 1 + ESAC +} +imm8[7:0] = LogicExp(_MM_TERNLOG_A, _MM_TERNLOG_B, _MM_TERNLOG_C) +FOR j := 0 to 7 + i := j*64 + FOR h := 0 to 63 + dst[i+h] := TernaryOP(imm8[7:0], a[i+h], b[i+h], c[i+h]) + ENDFOR +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - - Compute the bitwise AND of packed 64-bit integers in "a" and "b", producing - intermediate 64-bit values, and set the corresponding bit in result mask "k" (subject to - writemask "k") if the intermediate value is non-zero. - - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := ((a[i+63:i] AND b[i+63:i]) != 0) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + + Compute the bitwise AND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero. + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ((a[i+63:i] AND b[i+63:i]) != 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - Compute the bitwise AND of packed 64-bit integers in "a" and "b", producing - intermediate 64-bit values, and set the corresponding bit in result mask "k" if the - intermediate value is non-zero. - - FOR j := 0 to 7 - i := j*64 - k[j] := ((a[i+63:i] AND b[i+63:i]) != 0) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + Compute the bitwise AND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero. + +FOR j := 0 to 7 + i := j*64 + k[j] := ((a[i+63:i] AND b[i+63:i]) != 0) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - - Compute the bitwise NAND of packed 32-bit integers in "a" and "b", producing - intermediate 32-bit values, and set the corresponding bit in result mask "k" (subject to - writemask "k") if the intermediate value is zero. - - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := ((a[i+31:i] AND b[i+31:i]) == 0) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + + Compute the bitwise NAND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero. + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ((a[i+31:i] AND b[i+31:i]) == 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - Compute the bitwise NAND of packed 32-bit integers in "a" and "b", producing - intermediate 32-bit values, and set the corresponding bit in result mask "k" if the - intermediate value is zero. - - FOR j := 0 to 15 - i := j*32 - k[j] := ((a[i+31:i] AND b[i+31:i]) == 0) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + Compute the bitwise NAND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero. + +FOR j := 0 to 15 + i := j*32 + k[j] := ((a[i+31:i] AND b[i+31:i]) == 0) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - - Compute the bitwise NAND of packed 64-bit integers in "a" and "b", producing - intermediate 64-bit values, and set the corresponding bit in result mask "k" (subject to - writemask "k") if the intermediate value is zero. - - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := ((a[i+63:i] AND b[i+63:i]) == 0) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + + Compute the bitwise NAND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero. + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ((a[i+63:i] AND b[i+63:i]) == 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - Compute the bitwise NAND of packed 64-bit integers in "a" and "b", producing - intermediate 64-bit values, and set the corresponding bit in result mask "k" if the - intermediate value is zero. - - FOR j := 0 to 7 - i := j*64 - k[j] := ((a[i+63:i] AND b[i+63:i]) == 0) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + Compute the bitwise NAND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero. + +FOR j := 0 to 7 + i := j*64 + k[j] := ((a[i+63:i] AND b[i+63:i]) == 0) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - - Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] XOR b[i+31:i] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + + Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - - Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] XOR b[i+63:i] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + + Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - Broadcast 8-bit integer "a" to all elements of "dst". - - FOR j := 0 to 63 - i := j*8 - dst[i+7:i] := a[7:0] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Set + + + Broadcast 8-bit integer "a" to all elements of "dst". + +FOR j := 0 to 63 + i := j*8 + dst[i+7:i] := a[7:0] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Set
- - - - - Broadcast 32-bit integer "a" to all elements of "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[31:0] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Set + + + + + Broadcast 32-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Set
- - - - Broadcast 32-bit integer "a" to all elements of "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[31:0] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Set + + + + Broadcast 32-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[31:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Set
- - - Broadcast 32-bit integer "a" to all elements of "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := a[31:0] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Set + + + Broadcast 32-bit integer "a" to all elements of "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Set
- - - - - Broadcast 64-bit integer "a" to all elements of "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[63:0] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Set + + + + + Broadcast 64-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Set
- - - - Broadcast 64-bit integer "a" to all elements of "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[63:0] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Set + + + + Broadcast 64-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[63:0] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Set
- - - Broadcast 64-bit integer "a" to all elements of "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := a[63:0] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Set + + + Broadcast 64-bit integer "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Set
- - - Broadcast the low packed 16-bit integer from "a" to all all elements of "dst". - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := a[15:0] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Set + + + Broadcast the low packed 16-bit integer from "a" to all all elements of "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := a[15:0] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Set
- - - Broadcast double-precision (64-bit) floating-point value "a" to all elements of - "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := a[63:0] - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Set + + + Broadcast double-precision (64-bit) floating-point value "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Set
- - - Broadcast single-precision (32-bit) floating-point value "a" to all elements of - "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := a[31:0] - ENDFOR - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Set + + + Broadcast single-precision (32-bit) floating-point value "a" to all elements of "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Set
- - - - - - Set packed 32-bit integers in "dst" with the repeated 4 element sequence. - - dst[31:0] := a - dst[63:32] := b - dst[95:64] := c - dst[127:96] := d - dst[159:128] := a - dst[191:160] := b - dst[223:192] := c - dst[255:224] := d - dst[287:256] := a - dst[319:288] := b - dst[351:320] := c - dst[383:352] := d - dst[415:384] := a - dst[447:416] := b - dst[479:448] := c - dst[511:480] := d - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Set + + + + + + Set packed 32-bit integers in "dst" with the repeated 4 element sequence. + +dst[31:0] := a +dst[63:32] := b +dst[95:64] := c +dst[127:96] := d +dst[159:128] := a +dst[191:160] := b +dst[223:192] := c +dst[255:224] := d +dst[287:256] := a +dst[319:288] := b +dst[351:320] := c +dst[383:352] := d +dst[415:384] := a +dst[447:416] := b +dst[479:448] := c +dst[511:480] := d +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Set
- - - - - - Set packed 64-bit integers in "dst" with the repeated 4 element sequence. - - dst[63:0] := a - dst[127:64] := b - dst[191:128] := c - dst[255:192] := d - dst[319:256] := a - dst[383:320] := b - dst[447:384] := c - dst[511:448] := d - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Set + + + + + + Set packed 64-bit integers in "dst" with the repeated 4 element sequence. + +dst[63:0] := a +dst[127:64] := b +dst[191:128] := c +dst[255:192] := d +dst[319:256] := a +dst[383:320] := b +dst[447:384] := c +dst[511:448] := d +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Set
- - - - - - Set packed double-precision (64-bit) floating-point elements in "dst" with the - repeated 4 element sequence. - - dst[63:0] := a - dst[127:64] := b - dst[191:128] := c - dst[255:192] := d - dst[319:256] := a - dst[383:320] := b - dst[447:384] := c - dst[511:448] := d - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Set + + + + + + Set packed double-precision (64-bit) floating-point elements in "dst" with the repeated 4 element sequence. + +dst[63:0] := a +dst[127:64] := b +dst[191:128] := c +dst[255:192] := d +dst[319:256] := a +dst[383:320] := b +dst[447:384] := c +dst[511:448] := d +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Set
- - - - - - Set packed single-precision (32-bit) floating-point elements in "dst" with the - repeated 4 element sequence. - - dst[31:0] := a - dst[63:32] := b - dst[95:64] := c - dst[127:96] := d - dst[159:128] := a - dst[191:160] := b - dst[223:192] := c - dst[255:224] := d - dst[287:256] := a - dst[319:288] := b - dst[351:320] := c - dst[383:352] := d - dst[415:384] := a - dst[447:416] := b - dst[479:448] := c - dst[511:480] := d - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Set + + + + + + Set packed single-precision (32-bit) floating-point elements in "dst" with the repeated 4 element sequence. + +dst[31:0] := a +dst[63:32] := b +dst[95:64] := c +dst[127:96] := d +dst[159:128] := a +dst[191:160] := b +dst[223:192] := c +dst[255:224] := d +dst[287:256] := a +dst[319:288] := b +dst[351:320] := c +dst[383:352] := d +dst[415:384] := a +dst[447:416] := b +dst[479:448] := c +dst[511:480] := d +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Set
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Set packed 8-bit integers in "dst" with the supplied values. - - dst[7:0] := e0 - dst[15:8] := e1 - dst[23:16] := e2 - dst[31:24] := e3 - dst[39:32] := e4 - dst[47:40] := e5 - dst[55:48] := e6 - dst[63:56] := e7 - dst[71:64] := e8 - dst[79:72] := e9 - dst[87:80] := e10 - dst[95:88] := e11 - dst[103:96] := e12 - dst[111:104] := e13 - dst[119:112] := e14 - dst[127:120] := e15 - dst[135:128] := e16 - dst[143:136] := e17 - dst[151:144] := e18 - dst[159:152] := e19 - dst[167:160] := e20 - dst[175:168] := e21 - dst[183:176] := e22 - dst[191:184] := e23 - dst[199:192] := e24 - dst[207:200] := e25 - dst[215:208] := e26 - dst[223:216] := e27 - dst[231:224] := e28 - dst[239:232] := e29 - dst[247:240] := e30 - dst[255:248] := e31 - dst[263:256] := e32 - dst[271:264] := e33 - dst[279:272] := e34 - dst[287:280] := e35 - dst[295:288] := e36 - dst[303:296] := e37 - dst[311:304] := e38 - dst[319:312] := e39 - dst[327:320] := e40 - dst[335:328] := e41 - dst[343:336] := e42 - dst[351:344] := e43 - dst[359:352] := e44 - dst[367:360] := e45 - dst[375:368] := e46 - dst[383:376] := e47 - dst[391:384] := e48 - dst[399:392] := e49 - dst[407:400] := e50 - dst[415:408] := e51 - dst[423:416] := e52 - dst[431:424] := e53 - dst[439:432] := e54 - dst[447:440] := e55 - dst[455:448] := e56 - dst[463:456] := e57 - dst[471:464] := e58 - dst[479:472] := e59 - dst[487:480] := e60 - dst[495:488] := e61 - dst[503:496] := e62 - dst[511:504] := e63 - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Set + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Set packed 8-bit integers in "dst" with the supplied values. + +dst[7:0] := e0 +dst[15:8] := e1 +dst[23:16] := e2 +dst[31:24] := e3 +dst[39:32] := e4 +dst[47:40] := e5 +dst[55:48] := e6 +dst[63:56] := e7 +dst[71:64] := e8 +dst[79:72] := e9 +dst[87:80] := e10 +dst[95:88] := e11 +dst[103:96] := e12 +dst[111:104] := e13 +dst[119:112] := e14 +dst[127:120] := e15 +dst[135:128] := e16 +dst[143:136] := e17 +dst[151:144] := e18 +dst[159:152] := e19 +dst[167:160] := e20 +dst[175:168] := e21 +dst[183:176] := e22 +dst[191:184] := e23 +dst[199:192] := e24 +dst[207:200] := e25 +dst[215:208] := e26 +dst[223:216] := e27 +dst[231:224] := e28 +dst[239:232] := e29 +dst[247:240] := e30 +dst[255:248] := e31 +dst[263:256] := e32 +dst[271:264] := e33 +dst[279:272] := e34 +dst[287:280] := e35 +dst[295:288] := e36 +dst[303:296] := e37 +dst[311:304] := e38 +dst[319:312] := e39 +dst[327:320] := e40 +dst[335:328] := e41 +dst[343:336] := e42 +dst[351:344] := e43 +dst[359:352] := e44 +dst[367:360] := e45 +dst[375:368] := e46 +dst[383:376] := e47 +dst[391:384] := e48 +dst[399:392] := e49 +dst[407:400] := e50 +dst[415:408] := e51 +dst[423:416] := e52 +dst[431:424] := e53 +dst[439:432] := e54 +dst[447:440] := e55 +dst[455:448] := e56 +dst[463:456] := e57 +dst[471:464] := e58 +dst[479:472] := e59 +dst[487:480] := e60 +dst[495:488] := e61 +dst[503:496] := e62 +dst[511:504] := e63 +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Set
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Set packed 16-bit integers in "dst" with the supplied values. - - dst[15:0] := e0 - dst[31:16] := e1 - dst[47:32] := e2 - dst[63:48] := e3 - dst[79:64] := e4 - dst[95:80] := e5 - dst[111:96] := e6 - dst[127:112] := e7 - dst[143:128] := e8 - dst[159:144] := e9 - dst[175:160] := e10 - dst[191:176] := e11 - dst[207:192] := e12 - dst[223:208] := e13 - dst[239:224] := e14 - dst[255:240] := e15 - dst[271:256] := e16 - dst[287:272] := e17 - dst[303:288] := e18 - dst[319:304] := e19 - dst[335:320] := e20 - dst[351:336] := e21 - dst[367:352] := e22 - dst[383:368] := e23 - dst[399:384] := e24 - dst[415:400] := e25 - dst[431:416] := e26 - dst[447:432] := e27 - dst[463:448] := e28 - dst[479:464] := e29 - dst[495:480] := e30 - dst[511:496] := e31 - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Set + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Set packed 16-bit integers in "dst" with the supplied values. + +dst[15:0] := e0 +dst[31:16] := e1 +dst[47:32] := e2 +dst[63:48] := e3 +dst[79:64] := e4 +dst[95:80] := e5 +dst[111:96] := e6 +dst[127:112] := e7 +dst[143:128] := e8 +dst[159:144] := e9 +dst[175:160] := e10 +dst[191:176] := e11 +dst[207:192] := e12 +dst[223:208] := e13 +dst[239:224] := e14 +dst[255:240] := e15 +dst[271:256] := e16 +dst[287:272] := e17 +dst[303:288] := e18 +dst[319:304] := e19 +dst[335:320] := e20 +dst[351:336] := e21 +dst[367:352] := e22 +dst[383:368] := e23 +dst[399:384] := e24 +dst[415:400] := e25 +dst[431:416] := e26 +dst[447:432] := e27 +dst[463:448] := e28 +dst[479:464] := e29 +dst[495:480] := e30 +dst[511:496] := e31 +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Set
- - - - - - - - - - - - - - - - - - Set packed 32-bit integers in "dst" with the supplied values. - - dst[31:0] := e0 - dst[63:32] := e1 - dst[95:64] := e2 - dst[127:96] := e3 - dst[159:128] := e4 - dst[191:160] := e5 - dst[223:192] := e6 - dst[255:224] := e7 - dst[287:256] := e8 - dst[319:288] := e9 - dst[351:320] := e10 - dst[383:352] := e11 - dst[415:384] := e12 - dst[447:416] := e13 - dst[479:448] := e14 - dst[511:480] := e15 - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Set + + + + + + + + + + + + + + + + + + Set packed 32-bit integers in "dst" with the supplied values. + +dst[31:0] := e0 +dst[63:32] := e1 +dst[95:64] := e2 +dst[127:96] := e3 +dst[159:128] := e4 +dst[191:160] := e5 +dst[223:192] := e6 +dst[255:224] := e7 +dst[287:256] := e8 +dst[319:288] := e9 +dst[351:320] := e10 +dst[383:352] := e11 +dst[415:384] := e12 +dst[447:416] := e13 +dst[479:448] := e14 +dst[511:480] := e15 +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Set
- - - - - - - - - - Set packed 64-bit integers in "dst" with the supplied values. - - dst[63:0] := e0 - dst[127:64] := e1 - dst[191:128] := e2 - dst[255:192] := e3 - dst[319:256] := e4 - dst[383:320] := e5 - dst[447:384] := e6 - dst[511:448] := e7 - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Set + + + + + + + + + + Set packed 64-bit integers in "dst" with the supplied values. + +dst[63:0] := e0 +dst[127:64] := e1 +dst[191:128] := e2 +dst[255:192] := e3 +dst[319:256] := e4 +dst[383:320] := e5 +dst[447:384] := e6 +dst[511:448] := e7 +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Set
- - - - - - - - - - Set packed double-precision (64-bit) floating-point elements in "dst" with the - supplied values. - - dst[63:0] := e0 - dst[127:64] := e1 - dst[191:128] := e2 - dst[255:192] := e3 - dst[319:256] := e4 - dst[383:320] := e5 - dst[447:384] := e6 - dst[511:448] := e7 - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Set + + + + + + + + + + Set packed double-precision (64-bit) floating-point elements in "dst" with the supplied values. + +dst[63:0] := e0 +dst[127:64] := e1 +dst[191:128] := e2 +dst[255:192] := e3 +dst[319:256] := e4 +dst[383:320] := e5 +dst[447:384] := e6 +dst[511:448] := e7 +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Set
- - - - - - - - - - - - - - - - - - Set packed single-precision (32-bit) floating-point elements in "dst" with the - supplied values. - - dst[31:0] := e0 - dst[63:32] := e1 - dst[95:64] := e2 - dst[127:96] := e3 - dst[159:128] := e4 - dst[191:160] := e5 - dst[223:192] := e6 - dst[255:224] := e7 - dst[287:256] := e8 - dst[319:288] := e9 - dst[351:320] := e10 - dst[383:352] := e11 - dst[415:384] := e12 - dst[447:416] := e13 - dst[479:448] := e14 - dst[511:480] := e15 - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Set + + + + + + + + + + + + + + + + + + Set packed single-precision (32-bit) floating-point elements in "dst" with the supplied values. + +dst[31:0] := e0 +dst[63:32] := e1 +dst[95:64] := e2 +dst[127:96] := e3 +dst[159:128] := e4 +dst[191:160] := e5 +dst[223:192] := e6 +dst[255:224] := e7 +dst[287:256] := e8 +dst[319:288] := e9 +dst[351:320] := e10 +dst[383:352] := e11 +dst[415:384] := e12 +dst[447:416] := e13 +dst[479:448] := e14 +dst[511:480] := e15 +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Set
- - - - - - Set packed 32-bit integers in "dst" with the repeated 4 element sequence in - reverse order. - - dst[31:0] := d - dst[63:32] := c - dst[95:64] := b - dst[127:96] := a - dst[159:128] := d - dst[191:160] := c - dst[223:192] := b - dst[255:224] := a - dst[287:256] := d - dst[319:288] := c - dst[351:320] := b - dst[383:352] := a - dst[415:384] := d - dst[447:416] := c - dst[479:448] := b - dst[511:480] := a - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Set + + + + + + Set packed 32-bit integers in "dst" with the repeated 4 element sequence in reverse order. + +dst[31:0] := d +dst[63:32] := c +dst[95:64] := b +dst[127:96] := a +dst[159:128] := d +dst[191:160] := c +dst[223:192] := b +dst[255:224] := a +dst[287:256] := d +dst[319:288] := c +dst[351:320] := b +dst[383:352] := a +dst[415:384] := d +dst[447:416] := c +dst[479:448] := b +dst[511:480] := a +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Set
- - - - - - Set packed 64-bit integers in "dst" with the repeated 4 element sequence in - reverse order. - - dst[63:0] := d - dst[127:64] := c - dst[191:128] := b - dst[255:192] := a - dst[319:256] := d - dst[383:320] := c - dst[447:384] := b - dst[511:448] := a - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Set + + + + + + Set packed 64-bit integers in "dst" with the repeated 4 element sequence in reverse order. + +dst[63:0] := d +dst[127:64] := c +dst[191:128] := b +dst[255:192] := a +dst[319:256] := d +dst[383:320] := c +dst[447:384] := b +dst[511:448] := a +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Set
- - - - - - Set packed double-precision (64-bit) floating-point elements in "dst" with the - repeated 4 element sequence in reverse order. - - dst[63:0] := d - dst[127:64] := c - dst[191:128] := b - dst[255:192] := a - dst[319:256] := d - dst[383:320] := c - dst[447:384] := b - dst[511:448] := a - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Set + + + + + + Set packed double-precision (64-bit) floating-point elements in "dst" with the repeated 4 element sequence in reverse order. + +dst[63:0] := d +dst[127:64] := c +dst[191:128] := b +dst[255:192] := a +dst[319:256] := d +dst[383:320] := c +dst[447:384] := b +dst[511:448] := a +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Set
- - - - - - Set packed single-precision (32-bit) floating-point elements in "dst" with the - repeated 4 element sequence in reverse order. - - dst[31:0] := d - dst[63:32] := c - dst[95:64] := b - dst[127:96] := a - dst[159:128] := d - dst[191:160] := c - dst[223:192] := b - dst[255:224] := a - dst[287:256] := d - dst[319:288] := c - dst[351:320] := b - dst[383:352] := a - dst[415:384] := d - dst[447:416] := c - dst[479:448] := b - dst[511:480] := a - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Set + + + + + + Set packed single-precision (32-bit) floating-point elements in "dst" with the repeated 4 element sequence in reverse order. + +dst[31:0] := d +dst[63:32] := c +dst[95:64] := b +dst[127:96] := a +dst[159:128] := d +dst[191:160] := c +dst[223:192] := b +dst[255:224] := a +dst[287:256] := d +dst[319:288] := c +dst[351:320] := b +dst[383:352] := a +dst[415:384] := d +dst[447:416] := c +dst[479:448] := b +dst[511:480] := a +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Set
- - - - - - - - - - - - - - - - - - Set packed 32-bit integers in "dst" with the supplied values in reverse order. - - dst[31:0] := e15 - dst[63:32] := e14 - dst[95:64] := e13 - dst[127:96] := e12 - dst[159:128] := e11 - dst[191:160] := e10 - dst[223:192] := e9 - dst[255:224] := e8 - dst[287:256] := e7 - dst[319:288] := e6 - dst[351:320] := e5 - dst[383:352] := e4 - dst[415:384] := e3 - dst[447:416] := e2 - dst[479:448] := e1 - dst[511:480] := e0 - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Set + + + + + + + + + + + + + + + + + + Set packed 32-bit integers in "dst" with the supplied values in reverse order. + +dst[31:0] := e15 +dst[63:32] := e14 +dst[95:64] := e13 +dst[127:96] := e12 +dst[159:128] := e11 +dst[191:160] := e10 +dst[223:192] := e9 +dst[255:224] := e8 +dst[287:256] := e7 +dst[319:288] := e6 +dst[351:320] := e5 +dst[383:352] := e4 +dst[415:384] := e3 +dst[447:416] := e2 +dst[479:448] := e1 +dst[511:480] := e0 +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Set
- - - - - - - - - - Set packed 64-bit integers in "dst" with the supplied values in reverse order. - - dst[63:0] := e7 - dst[127:64] := e6 - dst[191:128] := e5 - dst[255:192] := e4 - dst[319:256] := e3 - dst[383:320] := e2 - dst[447:384] := e1 - dst[511:448] := e0 - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Set + + + + + + + + + + Set packed 64-bit integers in "dst" with the supplied values in reverse order. + +dst[63:0] := e7 +dst[127:64] := e6 +dst[191:128] := e5 +dst[255:192] := e4 +dst[319:256] := e3 +dst[383:320] := e2 +dst[447:384] := e1 +dst[511:448] := e0 +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Set
- - - - - - - - - - Set packed double-precision (64-bit) floating-point elements in "dst" with the - supplied values in reverse order. - - dst[63:0] := e7 - dst[127:64] := e6 - dst[191:128] := e5 - dst[255:192] := e4 - dst[319:256] := e3 - dst[383:320] := e2 - dst[447:384] := e1 - dst[511:448] := e0 - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Set + + + + + + + + + + Set packed double-precision (64-bit) floating-point elements in "dst" with the supplied values in reverse order. + +dst[63:0] := e7 +dst[127:64] := e6 +dst[191:128] := e5 +dst[255:192] := e4 +dst[319:256] := e3 +dst[383:320] := e2 +dst[447:384] := e1 +dst[511:448] := e0 +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Set
- - - - - - - - - - - - - - - - - - Set packed single-precision (32-bit) floating-point elements in "dst" with the - supplied values in reverse order. - - dst[31:0] := e15 - dst[63:32] := e14 - dst[95:64] := e13 - dst[127:96] := e12 - dst[159:128] := e11 - dst[191:160] := e10 - dst[223:192] := e9 - dst[255:224] := e8 - dst[287:256] := e7 - dst[319:288] := e6 - dst[351:320] := e5 - dst[383:352] := e4 - dst[415:384] := e3 - dst[447:416] := e2 - dst[479:448] := e1 - dst[511:480] := e0 - dst[MAX:512] := 0 - - AVX512F -
immintrin.h
- Set + + + + + + + + + + + + + + + + + + Set packed single-precision (32-bit) floating-point elements in "dst" with the supplied values in reverse order. + +dst[31:0] := e15 +dst[63:32] := e14 +dst[95:64] := e13 +dst[127:96] := e12 +dst[159:128] := e11 +dst[191:160] := e10 +dst[223:192] := e9 +dst[255:224] := e8 +dst[287:256] := e7 +dst[319:288] := e6 +dst[351:320] := e5 +dst[383:352] := e4 +dst[415:384] := e3 +dst[447:416] := e2 +dst[479:448] := e1 +dst[511:480] := e0 +dst[MAX:512] := 0 + + AVX512F +
immintrin.h
+ Set
- - - Return vector of type __m512 with all elements set to zero. - - dst[MAX:0] := 0 - - - AVX512F -
immintrin.h
- Set + + + Return vector of type __m512 with all elements set to zero. + +dst[MAX:0] := 0 + + + AVX512F +
immintrin.h
+ Set
- - Return vector of type __m512i with all elements set to zero. - - dst[MAX:0] := 0 - - - AVX512F -
immintrin.h
- Set + + Return vector of type __m512i with all elements set to zero. + +dst[MAX:0] := 0 + + + AVX512F +
immintrin.h
+ Set
- - Return vector of type __m512d with all elements set to zero. - - dst[MAX:0] := 0 - - - AVX512F -
immintrin.h
- Set + + Return vector of type __m512d with all elements set to zero. + +dst[MAX:0] := 0 + + + AVX512F +
immintrin.h
+ Set
- - Return vector of type __m512 with all elements set to zero. - - dst[MAX:0] := 0 - - - AVX512F -
immintrin.h
- Set + + Return vector of type __m512 with all elements set to zero. + +dst[MAX:0] := 0 + + + AVX512F +
immintrin.h
+ Set
- - Return vector of type __m512i with all elements set to zero. - - dst[MAX:0] := 0 - - - AVX512F -
immintrin.h
- Set + + Return vector of type __m512i with all elements set to zero. + +dst[MAX:0] := 0 + + + AVX512F +
immintrin.h
+ Set
- - - - - - Rotate the bits in each packed 32-bit integer in "a" to the left by the number - of bits specified in "imm8", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - DEFINE LEFT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src << count) OR (src >> (32 - count)) - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE LEFT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - Rotate the bits in each packed 32-bit integer in "a" to the left by the number - of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - DEFINE LEFT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src << count) OR (src >> (32 - count)) - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE LEFT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - Rotate the bits in each packed 32-bit integer in "a" to the left by the number - of bits specified in "imm8", and store the results in "dst". - - DEFINE LEFT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src << count) OR (src >> (32 - count)) - } - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst". + +DEFINE LEFT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - - Rotate the bits in each packed 64-bit integer in "a" to the left by the number - of bits specified in "imm8", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - DEFINE LEFT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src << count) OR (src >> (64 - count)) - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE LEFT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - Rotate the bits in each packed 64-bit integer in "a" to the left by the number - of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - DEFINE LEFT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src << count) OR (src >> (64 - count)) - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE LEFT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - Rotate the bits in each packed 64-bit integer in "a" to the left by the number - of bits specified in "imm8", and store the results in "dst". - - DEFINE LEFT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src << count) OR (src >> (64 - count)) - } - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst". + +DEFINE LEFT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - - Rotate the bits in each packed 32-bit integer in "a" to the left by the number - of bits specified in the corresponding element of "b", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - DEFINE LEFT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src << count) OR (src >> (32 - count)) - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE LEFT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - Rotate the bits in each packed 32-bit integer in "a" to the left by the number - of bits specified in the corresponding element of "b", and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE LEFT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src << count) OR (src >> (32 - count)) - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE LEFT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - Rotate the bits in each packed 32-bit integer in "a" to the left by the number - of bits specified in the corresponding element of "b", and store the results in "dst". - - DEFINE LEFT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src << count) OR (src >> (32 - count)) - } - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst". + +DEFINE LEFT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src << count) OR (src >> (32 - count)) +} +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - - Rotate the bits in each packed 64-bit integer in "a" to the left by the number - of bits specified in the corresponding element of "b", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - DEFINE LEFT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src << count) OR (src >> (64 - count)) - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE LEFT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - Rotate the bits in each packed 64-bit integer in "a" to the left by the number - of bits specified in the corresponding element of "b", and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE LEFT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src << count) OR (src >> (64 - count)) - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE LEFT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - Rotate the bits in each packed 64-bit integer in "a" to the left by the number - of bits specified in the corresponding element of "b", and store the results in "dst". - - DEFINE LEFT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src << count) OR (src >> (64 - count)) - } - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst". + +DEFINE LEFT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src << count) OR (src >> (64 - count)) +} +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - - Rotate the bits in each packed 32-bit integer in "a" to the right by the number - of bits specified in "imm8", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src >>count) OR (src << (32 - count)) - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - Rotate the bits in each packed 32-bit integer in "a" to the right by the number - of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src >>count) OR (src << (32 - count)) - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - Rotate the bits in each packed 32-bit integer in "a" to the right by the number - of bits specified in "imm8", and store the results in "dst". - - DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src >>count) OR (src << (32 - count)) - } - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst". + +DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - - Rotate the bits in each packed 64-bit integer in "a" to the right by the number - of bits specified in "imm8", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src >> count) OR (src << (64 - count)) - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - Rotate the bits in each packed 64-bit integer in "a" to the right by the number - of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src >> count) OR (src << (64 - count)) - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - Rotate the bits in each packed 64-bit integer in "a" to the right by the number - of bits specified in "imm8", and store the results in "dst". - - DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src >> count) OR (src << (64 - count)) - } - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst". + +DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - - Rotate the bits in each packed 32-bit integer in "a" to the right by the number - of bits specified in the corresponding element of "b", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src >>count) OR (src << (32 - count)) - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - Rotate the bits in each packed 32-bit integer in "a" to the right by the number - of bits specified in the corresponding element of "b", and store the results in "dst". - - DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { - count := count_src % 32 - RETURN (src >>count) OR (src << (32 - count)) - } - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst". + +DEFINE RIGHT_ROTATE_DWORDS(src, count_src) { + count := count_src % 32 + RETURN (src >>count) OR (src << (32 - count)) +} +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - - Rotate the bits in each packed 64-bit integer in "a" to the right by the number - of bits specified in the corresponding element of "b", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src >> count) OR (src << (64 - count)) - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - Rotate the bits in each packed 64-bit integer in "a" to the right by the number - of bits specified in the corresponding element of "b", and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src >> count) OR (src << (64 - count)) - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - Rotate the bits in each packed 64-bit integer in "a" to the right by the number - of bits specified in the corresponding element of "b", and store the results in "dst". - - DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { - count := count_src % 64 - RETURN (src >> count) OR (src << (64 - count)) - } - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst". + +DEFINE RIGHT_ROTATE_QWORDS(src, count_src) { + count := count_src % 64 + RETURN (src >> count) OR (src << (64 - count)) +} +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - - Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF count[63:0] > 31 + + + + + + Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF count[63:0] > 31 dst[i+31:i] := 0 - ELSE + ELSE dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[63:0]) - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF count[63:0] > 31 + + + + + Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF count[63:0] > 31 dst[i+31:i] := 0 - ELSE + ELSE dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[63:0]) - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF imm8[7:0] > 31 + + + + + Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 dst[i+31:i] := 0 - ELSE + ELSE dst[i+31:i] := ZeroExtend32(a[i+31:i] << imm8[7:0]) - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - IF count[63:0] > 31 - dst[i+31:i] := 0 - ELSE - dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[63:0]) - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[63:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF count[63:0] > 63 + + + + + + Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF count[63:0] > 63 dst[i+63:i] := 0 - ELSE + ELSE dst[i+63:i] := ZeroExtend64(a[i+63:i] << count[63:0]) - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF imm8[7:0] > 63 + + + + + + Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 dst[i+63:i] := 0 - ELSE + ELSE dst[i+63:i] := ZeroExtend64(a[i+63:i] << imm8[7:0]) - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF count[63:0] > 63 + + + + + Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF count[63:0] > 63 dst[i+63:i] := 0 - ELSE + ELSE dst[i+63:i] := ZeroExtend64(a[i+63:i] << count[63:0]) - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF imm8[7:0] > 63 + + + + + Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 dst[i+63:i] := 0 - ELSE + ELSE dst[i+63:i] := ZeroExtend64(a[i+63:i] << imm8[7:0]) - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - IF count[63:0] > 63 - dst[i+63:i] := 0 - ELSE - dst[i+63:i] := ZeroExtend64(a[i+63:i] << count[63:0]) - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + IF count[63:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend64(a[i+63:i] << count[63:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - IF imm8[7:0] > 63 - dst[i+63:i] := 0 - ELSE - dst[i+63:i] := ZeroExtend64(a[i+63:i] << imm8[7:0]) - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + IF imm8[7:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend64(a[i+63:i] << imm8[7:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF count[i+31:i] < 32 + + + + + Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF count[i+31:i] < 32 dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ELSE + ELSE dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF count[i+63:i] < 64 + + + + + + Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF count[i+63:i] < 64 dst[i+63:i] := ZeroExtend64(a[i+63:i] << count[i+63:i]) - ELSE + ELSE dst[i+63:i] := 0 - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF count[i+63:i] < 64 + + + + + Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF count[i+63:i] < 64 dst[i+63:i] := ZeroExtend64(a[i+63:i] << count[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ELSE + ELSE dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in - "dst". - - FOR j := 0 to 7 - i := j*64 - IF count[i+63:i] < 64 - dst[i+63:i] := ZeroExtend64(a[i+63:i] << count[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + IF count[i+63:i] < 64 + dst[i+63:i] := ZeroExtend64(a[i+63:i] << count[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - - Shift packed 32-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF count[63:0] > 31 + + + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF count[63:0] > 31 dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) - ELSE + ELSE dst[i+31:i] := SignExtend32(a[i+31:i] >> count[63:0]) - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF count[63:0] > 31 + + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF count[63:0] > 31 dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) - ELSE + ELSE dst[i+31:i] := SignExtend32(a[i+31:i] >> count[63:0]) - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF imm8[7:0] > 31 + + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) - ELSE + ELSE dst[i+31:i] := SignExtend32(a[i+31:i] >> imm8[7:0]) - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - IF count[63:0] > 31 - dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) - ELSE - dst[i+31:i] := SignExtend32(a[i+31:i] >> count[63:0]) - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) + ELSE + dst[i+31:i] := SignExtend32(a[i+31:i] >> count[63:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF count[63:0] > 63 + + + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF count[63:0] > 63 dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0) - ELSE + ELSE dst[i+63:i] := SignExtend64(a[i+63:i] >> count[63:0]) - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF imm8[7:0] > 63 + + + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0) - ELSE + ELSE dst[i+63:i] := SignExtend64(a[i+63:i] >> imm8[7:0]) - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF count[63:0] > 63 + + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF count[63:0] > 63 dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0) - ELSE + ELSE dst[i+63:i] := SignExtend64(a[i+63:i] >> count[63:0]) - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF imm8[7:0] > 63 + + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0) - ELSE + ELSE dst[i+63:i] := SignExtend64(a[i+63:i] >> imm8[7:0]) - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - IF count[63:0] > 63 - dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0) - ELSE - dst[i+63:i] := SignExtend64(a[i+63:i] >> count[63:0]) - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + IF count[63:0] > 63 + dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0) + ELSE + dst[i+63:i] := SignExtend64(a[i+63:i] >> count[63:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - IF imm8[7:0] > 63 - dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0) - ELSE - dst[i+63:i] := SignExtend64(a[i+63:i] >> imm8[7:0]) - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + IF imm8[7:0] > 63 + dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0) + ELSE + dst[i+63:i] := SignExtend64(a[i+63:i] >> imm8[7:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF count[i+31:i] < 32 + + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF count[i+31:i] < 32 dst[i+31:i] := SignExtend32(a[i+31:i] >> count[i+31:i]) - ELSE + ELSE dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0) - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF count[i+63:i] < 64 + + + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF count[i+63:i] < 64 dst[i+63:i] := SignExtend64(a[i+63:i] >> count[i+63:i]) - ELSE + ELSE dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0) - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF count[i+63:i] < 64 + + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF count[i+63:i] < 64 dst[i+63:i] := SignExtend64(a[i+63:i] >> count[i+63:i]) - ELSE + ELSE dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0) - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst". - - FOR j := 0 to 7 - i := j*64 - IF count[i+63:i] < 64 - dst[i+63:i] := SignExtend64(a[i+63:i] >> count[i+63:i]) - ELSE - dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0) - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + IF count[i+63:i] < 64 + dst[i+63:i] := SignExtend64(a[i+63:i] >> count[i+63:i]) + ELSE + dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0) + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - - Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF count[63:0] > 31 + + + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF count[63:0] > 31 dst[i+31:i] := 0 - ELSE + ELSE dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[63:0]) - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF count[63:0] > 31 + + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF count[63:0] > 31 dst[i+31:i] := 0 - ELSE + ELSE dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[63:0]) - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF imm8[7:0] > 31 + + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 dst[i+31:i] := 0 - ELSE + ELSE dst[i+31:i] := ZeroExtend32(a[i+31:i] >> imm8[7:0]) - FI - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - IF count[63:0] > 31 - dst[i+31:i] := 0 - ELSE - dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[63:0]) - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[63:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF count[63:0] > 63 + + + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF count[63:0] > 63 dst[i+63:i] := 0 - ELSE + ELSE dst[i+63:i] := ZeroExtend64(a[i+63:i] >> count[63:0]) - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF imm8[7:0] > 63 + + + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 dst[i+63:i] := 0 - ELSE + ELSE dst[i+63:i] := ZeroExtend64(a[i+63:i] >> imm8[7:0]) - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF count[63:0] > 63 + + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF count[63:0] > 63 dst[i+63:i] := 0 - ELSE + ELSE dst[i+63:i] := ZeroExtend64(a[i+63:i] >> count[63:0]) - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF imm8[7:0] > 63 + + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF imm8[7:0] > 63 dst[i+63:i] := 0 - ELSE + ELSE dst[i+63:i] := ZeroExtend64(a[i+63:i] >> imm8[7:0]) - FI - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - IF count[63:0] > 63 - dst[i+63:i] := 0 - ELSE - dst[i+63:i] := ZeroExtend64(a[i+63:i] >> count[63:0]) - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + IF count[63:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend64(a[i+63:i] >> count[63:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - IF imm8[7:0] > 63 - dst[i+63:i] := 0 - ELSE - dst[i+63:i] := ZeroExtend64(a[i+63:i] >> imm8[7:0]) - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + IF imm8[7:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend64(a[i+63:i] >> imm8[7:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF count[i+31:i] < 32 + + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF count[i+31:i] < 32 dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ELSE + ELSE dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - - - Shift packed 64-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF count[i+63:i] < 64 + + + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF count[i+63:i] < 64 dst[i+63:i] := ZeroExtend64(a[i+63:i] >> count[i+63:i]) - ELSE + ELSE dst[i+63:i] := 0 - FI - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - - Shift packed 64-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - IF count[i+63:i] < 64 + + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + IF count[i+63:i] < 64 dst[i+63:i] := ZeroExtend64(a[i+63:i] >> count[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ELSE + ELSE dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in - "dst". - - FOR j := 0 to 7 - i := j*64 - IF count[i+63:i] < 64 - dst[i+63:i] := ZeroExtend64(a[i+63:i] >> count[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + IF count[i+63:i] < 64 + dst[i+63:i] := ZeroExtend64(a[i+63:i] >> count[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - Compute the approximate reciprocal of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 2^-14. - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := (1.0 / a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (1.0 / a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - Compute the approximate reciprocal of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 2^-14. - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := (1.0 / a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (1.0 / a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Compute the approximate reciprocal of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst". The maximum relative - error for this approximation is less than 2^-14. - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := (1.0 / a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := (1.0 / a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the approximate reciprocal of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 2^-14. - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := (1.0 / a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (1.0 / a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - Compute the approximate reciprocal of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 2^-14. - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := (1.0 / a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (1.0 / a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Compute the approximate reciprocal of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst". The maximum relative - error for this approximation is less than 2^-14. - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := (1.0 / a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := (1.0 / a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - - Compute the approximate reciprocal of the lower double-precision (64-bit) - floating-point element in "b", store the result in the lower element of "dst" using - writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy - the upper element from "a" to the upper element of "dst". The maximum relative error for - this approximation is less than 2^-14. - - IF k[0] - dst[63:0] := (1.0 / b[63:0]) - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + + Compute the approximate reciprocal of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-14. + +IF k[0] + dst[63:0] := (1.0 / b[63:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the approximate reciprocal of the lower double-precision (64-bit) - floating-point element in "b", store the result in the lower element of "dst" using - zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper - element from "a" to the upper element of "dst". The maximum relative error for this - approximation is less than 2^-14. - - IF k[0] - dst[63:0] := (1.0 / b[63:0]) - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the approximate reciprocal of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-14. + +IF k[0] + dst[63:0] := (1.0 / b[63:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - Compute the approximate reciprocal of the lower double-precision (64-bit) - floating-point element in "b", store the result in the lower element of "dst", and copy - the upper element from "a" to the upper element of "dst". The maximum relative error for - this approximation is less than 2^-14. - - dst[63:0] := (1.0 / b[63:0]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + Compute the approximate reciprocal of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-14. + +dst[63:0] := (1.0 / b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - - Compute the approximate reciprocal of the lower single-precision (32-bit) - floating-point element in "b", store the result in the lower element of "dst" using - writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy - the upper 3 packed elements from "a" to the upper elements of "dst". The maximum - relative error for this approximation is less than 2^-14. - - IF k[0] - dst[31:0] := (1.0 / b[31:0]) - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + + Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-14. + +IF k[0] + dst[31:0] := (1.0 / b[31:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the approximate reciprocal of the lower single-precision (32-bit) - floating-point element in "b", store the result in the lower element of "dst" using - zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper - 3 packed elements from "a" to the upper elements of "dst". The maximum relative error - for this approximation is less than 2^-14. - - IF k[0] - dst[31:0] := (1.0 / b[31:0]) - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-14. + +IF k[0] + dst[31:0] := (1.0 / b[31:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - Compute the approximate reciprocal of the lower single-precision (32-bit) - floating-point element in "b", store the result in the lower element of "dst", and copy - the upper 3 packed elements from "a" to the upper elements of "dst". The maximum - relative error for this approximation is less than 2^-14. - - dst[31:0] := (1.0 / b[31:0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-14. + +dst[31:0] := (1.0 / b[31:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the approximate reciprocal square root of packed double-precision - (64-bit) floating-point elements in "a", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). The - maximum relative error for this approximation is less than 2^-14. - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := (1.0 / SQRT(a[i+63:i])) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (1.0 / SQRT(a[i+63:i])) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - Compute the approximate reciprocal square root of packed double-precision - (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 2^-14. - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := (1.0 / SQRT(a[i+63:i])) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (1.0 / SQRT(a[i+63:i])) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Compute the approximate reciprocal square root of packed double-precision - (64-bit) floating-point elements in "a", and store the results in "dst". The maximum - relative error for this approximation is less than 2^-14. - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := (1.0 / SQRT(a[i+63:i])) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := (1.0 / SQRT(a[i+63:i])) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the approximate reciprocal square root of packed single-precision - (32-bit) floating-point elements in "a", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). The - maximum relative error for this approximation is less than 2^-14. - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := (1.0 / SQRT(a[i+31:i])) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (1.0 / SQRT(a[i+31:i])) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - Compute the approximate reciprocal square root of packed single-precision - (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 2^-14. - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := (1.0 / SQRT(a[i+31:i])) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (1.0 / SQRT(a[i+31:i])) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Compute the approximate reciprocal square root of packed single-precision - (32-bit) floating-point elements in "a", and store the results in "dst". The maximum - relative error for this approximation is less than 2^-14. - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := (1.0 / SQRT(a[i+31:i])) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14. + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := (1.0 / SQRT(a[i+31:i])) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - - Compute the approximate reciprocal square root of the lower double-precision - (64-bit) floating-point element in "b", store the result in the lower element of "dst" - using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and - copy the upper element from "a" to the upper element of "dst". The maximum relative - error for this approximation is less than 2^-14. - - IF k[0] - dst[63:0] := (1.0 / SQRT(b[63:0])) - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + + Compute the approximate reciprocal square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-14. + +IF k[0] + dst[63:0] := (1.0 / SQRT(b[63:0])) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the approximate reciprocal square root of the lower double-precision - (64-bit) floating-point element in "b", store the result in the lower element of "dst" - using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the - upper element from "a" to the upper element of "dst". The maximum relative error for - this approximation is less than 2^-14. - - IF k[0] - dst[63:0] := (1.0 / SQRT(b[63:0])) - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the approximate reciprocal square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-14. + +IF k[0] + dst[63:0] := (1.0 / SQRT(b[63:0])) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - Compute the approximate reciprocal square root of the lower double-precision - (64-bit) floating-point element in "b", store the result in the lower element of "dst", - and copy the upper element from "a" to the upper element of "dst". The maximum relative - error for this approximation is less than 2^-14. - - dst[63:0] := (1.0 / SQRT(b[63:0])) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + Compute the approximate reciprocal square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-14. + +dst[63:0] := (1.0 / SQRT(b[63:0])) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - - Compute the approximate reciprocal square root of the lower single-precision - (32-bit) floating-point element in "b", store the result in the lower element of "dst" - using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and - copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum - relative error for this approximation is less than 2^-14. - - IF k[0] - dst[31:0] := (1.0 / SQRT(b[31:0])) - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + + Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-14. + +IF k[0] + dst[31:0] := (1.0 / SQRT(b[31:0])) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the approximate reciprocal square root of the lower single-precision - (32-bit) floating-point element in "b", store the result in the lower element of "dst" - using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the - upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative - error for this approximation is less than 2^-14. - - IF k[0] - dst[31:0] := (1.0 / SQRT(b[31:0])) - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-14. + +IF k[0] + dst[31:0] := (1.0 / SQRT(b[31:0])) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - Compute the approximate reciprocal square root of the lower single-precision - (32-bit) floating-point element in "b", store the result in the lower element of "dst", - and copy the upper 3 packed elements from "a" to the upper elements of "dst". The - maximum relative error for this approximation is less than 2^-14. - - dst[31:0] := (1.0 / SQRT(b[31:0])) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-14. + +dst[31:0] := (1.0 / SQRT(b[31:0])) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the square root of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := SQRT(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SQRT(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - - Compute the square root of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := SQRT(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SQRT(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - Compute the square root of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := SQRT(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SQRT(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the square root of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - [round_note]. - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := SQRT(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note]. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := SQRT(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Compute the square root of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := SQRT(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := SQRT(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - Compute the square root of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst". - [round_note]. - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := SQRT(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + [round_note]. + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := SQRT(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the square root of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := SQRT(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SQRT(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - - Compute the square root of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := SQRT(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SQRT(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - Compute the square root of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := SQRT(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SQRT(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the square root of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := SQRT(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := SQRT(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Compute the square root of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := SQRT(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := SQRT(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - Compute the square root of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst". - [round_note]. - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := SQRT(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + [round_note]. + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := SQRT(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - - - Compute the square root of the lower double-precision (64-bit) floating-point - element in "b", store the result in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper element - from "a" to the upper element of "dst". - [round_note] - - IF k[0] - dst[63:0] := SQRT(b[63:0]) - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + + + Compute the square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := SQRT(b[63:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - - Compute the square root of the lower double-precision (64-bit) floating-point - element in "b", store the result in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper element - from "a" to the upper element of "dst". - - IF k[0] - dst[63:0] := SQRT(b[63:0]) - ELSE - dst[63:0] := src[63:0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + + Compute the square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := SQRT(b[63:0]) +ELSE + dst[63:0] := src[63:0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - - Compute the square root of the lower double-precision (64-bit) floating-point - element in "b", store the result in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" - to the upper element of "dst". - [round_note] - - IF k[0] - dst[63:0] := SQRT(b[63:0]) - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + + Compute the square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst[63:0] := SQRT(b[63:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the square root of the lower double-precision (64-bit) floating-point - element in "b", store the result in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" - to the upper element of "dst". - - IF k[0] - dst[63:0] := SQRT(b[63:0]) - ELSE - dst[63:0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst[63:0] := SQRT(b[63:0]) +ELSE + dst[63:0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the square root of the lower double-precision (64-bit) floating-point - element in "b", store the result in the lower element of "dst", and copy the upper - element from "a" to the upper element of "dst". - [round_note] - - dst[63:0] := SQRT(b[63:0]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + +dst[63:0] := SQRT(b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - - - Compute the square root of the lower single-precision (32-bit) floating-point - element in "b", store the result in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed - elements from "a" to the upper elements of "dst". - [round_note] - - IF k[0] - dst[31:0] := SQRT(b[31:0]) - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + + + Compute the square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := SQRT(b[31:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - - Compute the square root of the lower single-precision (32-bit) floating-point - element in "b", store the result in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed - elements from "a" to the upper elements of "dst". - - IF k[0] - dst[31:0] := SQRT(b[31:0]) - ELSE - dst[31:0] := src[31:0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + + Compute the square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := SQRT(b[31:0]) +ELSE + dst[31:0] := src[31:0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - - Compute the square root of the lower single-precision (32-bit) floating-point - element in "b", store the result in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements - from "a" to the upper elements of "dst". - [round_note] - - IF k[0] - dst[31:0] := SQRT(b[31:0]) - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + + Compute the square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst[31:0] := SQRT(b[31:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the square root of the lower single-precision (32-bit) floating-point - element in "b", store the result in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements - from "a" to the upper elements of "dst". - - IF k[0] - dst[31:0] := SQRT(b[31:0]) - ELSE - dst[31:0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst[31:0] := SQRT(b[31:0]) +ELSE + dst[31:0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the square root of the lower single-precision (32-bit) floating-point - element in "b", store the result in the lower element of "dst", and copy the upper 3 - packed elements from "a" to the upper elements of "dst". - [round_note] - - dst[31:0] := SQRT(b[31:0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512F -
immintrin.h
- Elementary Math Functions + + + + + Compute the square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst[31:0] := SQRT(b[31:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512F +
immintrin.h
+ Elementary Math Functions
- - - Cast vector of type __m128d to type __m512d; the upper 384 bits of the result - are undefined. - This intrinsic is only used for compilation and does not generate any instructions, thus - it has zero latency. - AVX512F -
immintrin.h
- Cast + + + Cast vector of type __m128d to type __m512d; the upper 384 bits of the result are undefined. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512F +
immintrin.h
+ Cast
- - - Cast vector of type __m256d to type __m512d; the upper 256 bits of the result - are undefined. - This intrinsic is only used for compilation and does not generate any instructions, thus - it has zero latency. - AVX512F -
immintrin.h
- Cast + + + Cast vector of type __m256d to type __m512d; the upper 256 bits of the result are undefined. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512F +
immintrin.h
+ Cast
- - - Cast vector of type __m512d to type __m128d. - This intrinsic is only used for compilation and does not generate any instructions, thus - it has zero latency. - AVX512F -
immintrin.h
- Cast + + + Cast vector of type __m512d to type __m128d. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512F +
immintrin.h
+ Cast
- - - Cast vector of type __m512 to type __m128. - This intrinsic is only used for compilation and does not generate any instructions, thus - it has zero latency. - AVX512F -
immintrin.h
- Cast + + + Cast vector of type __m512 to type __m128. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512F +
immintrin.h
+ Cast
- - - Cast vector of type __m512d to type __m256d. - This intrinsic is only used for compilation and does not generate any instructions, thus - it has zero latency. - AVX512F -
immintrin.h
- Cast + + + Cast vector of type __m512d to type __m256d. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512F +
immintrin.h
+ Cast
- - - Cast vector of type __m128 to type __m512; the upper 384 bits of the result are - undefined. - This intrinsic is only used for compilation and does not generate any instructions, thus - it has zero latency. - AVX512F -
immintrin.h
- Cast + + + Cast vector of type __m128 to type __m512; the upper 384 bits of the result are undefined. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512F +
immintrin.h
+ Cast
- - - Cast vector of type __m256 to type __m512; the upper 256 bits of the result are - undefined. - This intrinsic is only used for compilation and does not generate any instructions, thus - it has zero latency. - AVX512F -
immintrin.h
- Cast + + + Cast vector of type __m256 to type __m512; the upper 256 bits of the result are undefined. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512F +
immintrin.h
+ Cast
- - - Cast vector of type __m512 to type __m256. - This intrinsic is only used for compilation and does not generate any instructions, thus - it has zero latency. - AVX512F -
immintrin.h
- Cast + + + Cast vector of type __m512 to type __m256. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512F +
immintrin.h
+ Cast
- - - Cast vector of type __m128i to type __m512i; the upper 384 bits of the result - are undefined. - This intrinsic is only used for compilation and does not generate any instructions, thus - it has zero latency. - AVX512F -
immintrin.h
- Cast + + + Cast vector of type __m128i to type __m512i; the upper 384 bits of the result are undefined. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512F +
immintrin.h
+ Cast
- - - Cast vector of type __m256i to type __m512i; the upper 256 bits of the result - are undefined. - This intrinsic is only used for compilation and does not generate any instructions, thus - it has zero latency. - AVX512F -
immintrin.h
- Cast + + + Cast vector of type __m256i to type __m512i; the upper 256 bits of the result are undefined. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512F +
immintrin.h
+ Cast
- - - Cast vector of type __m512i to type __m128i. - This intrinsic is only used for compilation and does not generate any instructions, thus - it has zero latency. - AVX512F -
immintrin.h
- Cast + + + Cast vector of type __m512i to type __m128i. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512F +
immintrin.h
+ Cast
- - - Cast vector of type __m512i to type __m256i. - This intrinsic is only used for compilation and does not generate any instructions, thus - it has zero latency. - AVX512F -
immintrin.h
- Cast + + + Cast vector of type __m512i to type __m256i. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512F +
immintrin.h
+ Cast
- - - Cast vector of type __m128d to type __m512d; the upper 384 bits of the result - are zeroed. This intrinsic is only used for compilation and does not generate any - instructions, thus it has zero latency. - AVX512F -
immintrin.h
- Cast + + + Cast vector of type __m128d to type __m512d; the upper 384 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512F +
immintrin.h
+ Cast
- - - Cast vector of type __m128 to type __m512; the upper 384 bits of the result are - zeroed. This intrinsic is only used for compilation and does not generate any - instructions, thus it has zero latency. - AVX512F -
immintrin.h
- Cast + + + Cast vector of type __m128 to type __m512; the upper 384 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512F +
immintrin.h
+ Cast
- - - Cast vector of type __m128i to type __m512i; the upper 384 bits of the result - are zeroed. This intrinsic is only used for compilation and does not generate any - instructions, thus it has zero latency. - AVX512F -
immintrin.h
- Cast + + + Cast vector of type __m128i to type __m512i; the upper 384 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512F +
immintrin.h
+ Cast
- - - Cast vector of type __m256d to type __m512d; the upper 256 bits of the result - are zeroed. This intrinsic is only used for compilation and does not generate any - instructions, thus it has zero latency. - AVX512F -
immintrin.h
- Cast + + + Cast vector of type __m256d to type __m512d; the upper 256 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512F +
immintrin.h
+ Cast
- - - Cast vector of type __m256 to type __m512; the upper 256 bits of the result are - zeroed. This intrinsic is only used for compilation and does not generate any - instructions, thus it has zero latency. - AVX512F -
immintrin.h
- Cast + + + Cast vector of type __m256 to type __m512; the upper 256 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512F +
immintrin.h
+ Cast
- - - Cast vector of type __m256i to type __m512i; the upper 256 bits of the result - are zeroed. This intrinsic is only used for compilation and does not generate any - instructions, thus it has zero latency. - AVX512F -
immintrin.h
- Cast + + + Cast vector of type __m256i to type __m512i; the upper 256 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512F +
immintrin.h
+ Cast
- - - Return vector of type __m512 with undefined elements. - AVX512F -
immintrin.h
- General Support + + + Return vector of type __m512 with undefined elements. + AVX512F +
immintrin.h
+ General Support
- - Return vector of type __m512i with undefined elements. - AVX512F -
immintrin.h
- General Support + + Return vector of type __m512i with undefined elements. + AVX512F +
immintrin.h
+ General Support
- - Return vector of type __m512d with undefined elements. - AVX512F -
immintrin.h
- General Support + + Return vector of type __m512d with undefined elements. + AVX512F +
immintrin.h
+ General Support
- - Return vector of type __m512 with undefined elements. - AVX512F -
immintrin.h
- General Support + + Return vector of type __m512 with undefined elements. + AVX512F +
immintrin.h
+ General Support
- - - - Add packed double-precision (64-bit) floating-point elements in "a" and "b", - and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := a[i+63:i] + b[i+63:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] + b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Add packed double-precision (64-bit) floating-point elements in "a" and "b", - and store the results in "dst". - [round_note] - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := a[i+63:i] + b[i+63:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] + b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Add packed double-precision (64-bit) floating-point elements in "a" and "b", - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] + b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Add packed double-precision (64-bit) floating-point elements in "a" and "b", - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] + b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Add packed single-precision (32-bit) floating-point elements in "a" and "b", - and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := a[i+31:i] + b[i+31:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] + b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Add packed single-precision (32-bit) floating-point elements in "a" and "b", - and store the results in "dst". - [round_note] - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := a[i+31:i] + b[i+31:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + [round_note] + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] + b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Add packed single-precision (32-bit) floating-point elements in "a" and "b", - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] + b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Add packed single-precision (32-bit) floating-point elements in "a" and "b", - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] + b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst". - [round_note] - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit - is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := c[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit - is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := c[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit - is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit - is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst". - [round_note] - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst". + [round_note] + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit - is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := c[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit - is not set). - [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := c[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit - is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit - is not set). - [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst". - [round_note] - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := c[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask - bit is not set). [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := c[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask - bit is not set). [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst". - [round_note] - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst". + [round_note] + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := c[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask - bit is not set). [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := c[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask - bit is not set). [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst". - [round_note] - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst" using writemask "k" (elements are copied from "c" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := c[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst" using writemask "k" (elements are copied from "c" when the - corresponding mask bit is not set). [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := c[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst". - [round_note] - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst". + [round_note] + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst" using writemask "k" (elements are copied from "c" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := c[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst" using writemask "k" (elements are copied from "c" when the - corresponding mask bit is not set). [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := c[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst". - [round_note] - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using writemask "k" (elements are copied from "c" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := c[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using writemask "k" (elements are copied from "c" when the - corresponding mask bit is not set). [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := c[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := c[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst". - [round_note] - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst". + [round_note] + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using writemask "k" (elements are copied from "c" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := c[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using writemask "k" (elements are copied from "c" when the - corresponding mask bit is not set). [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := c[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := c[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). RM. - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] * b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). RM. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] * b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] * b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] * b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := a[i+63:i] * b[i+63:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] * b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", and store the results in "dst". - [round_note] - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := a[i+63:i] * b[i+63:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] * b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). RM. - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] * b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). RM. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] * b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] * b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := a[i+31:i] * b[i+31:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] * b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", and store the results in "dst". - [round_note] - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := a[i+31:i] * b[i+31:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + [round_note] + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] * b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Add packed 32-bit integers in "a" and "b", and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := a[i+31:i] + b[i+31:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + Add packed 32-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] + b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Add packed 32-bit integers in "a" and "b", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] + b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Add packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the packed 32-bit integers in "a" and "b", producing intermediate - 64-bit integers, and store the low 32 bits of the intermediate integers in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - tmp[63:0] := a[i+31:i] * b[i+31:i] - dst[i+31:i] := tmp[31:0] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Multiply the packed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + tmp[63:0] := a[i+31:i] * b[i+31:i] + dst[i+31:i] := tmp[31:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Multiply the packed 32-bit integers in "a" and "b", producing intermediate - 64-bit integers, and store the low 32 bits of the intermediate integers in "dst". - - FOR j := 0 to 15 - i := j*32 - tmp[63:0] := a[i+31:i] * b[i+31:i] - dst[i+31:i] := tmp[31:0] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + Multiply the packed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst". + +FOR j := 0 to 15 + i := j*32 + tmp[63:0] := a[i+31:i] * b[i+31:i] + dst[i+31:i] := tmp[31:0] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] - b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and - store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := a[i+31:i] - b[i+31:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] - b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed double-precision (64-bit) floating-point elements in "b" from - packed double-precision (64-bit) floating-point elements in "a", and store the results - in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] - b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Subtract packed double-precision (64-bit) floating-point elements in "b" from - packed double-precision (64-bit) floating-point elements in "a", and store the results - in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - [round_note] - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] - b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Subtract packed double-precision (64-bit) floating-point elements in "b" from - packed double-precision (64-bit) floating-point elements in "a", and store the results - in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := a[i+63:i] - b[i+63:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] - b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Subtract packed double-precision (64-bit) floating-point elements in "b" from - packed double-precision (64-bit) floating-point elements in "a", and store the results - in "dst". - [round_note] - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := a[i+63:i] - b[i+63:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + [round_note] + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] - b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed single-precision (32-bit) floating-point elements in "b" from - packed single-precision (32-bit) floating-point elements in "a", and store the results - in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] - b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Subtract packed single-precision (32-bit) floating-point elements in "b" from - packed single-precision (32-bit) floating-point elements in "a", and store the results - in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - [round_note] - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] - b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + + + Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Subtract packed single-precision (32-bit) floating-point elements in "b" from - packed single-precision (32-bit) floating-point elements in "a", and store the results - in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := a[i+31:i] - b[i+31:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] - b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Subtract packed single-precision (32-bit) floating-point elements in "b" from - packed single-precision (32-bit) floating-point elements in "a", and store the results - in "dst". - [round_note] - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := a[i+31:i] - b[i+31:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + [round_note] + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] - b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Reduce the packed 32-bit integers in "a" by addition using mask "k". Returns - the sum of all active elements in "a". - - DEFINE REDUCE_ADD(src, len) { - IF len == 2 - RETURN src[31:0] + src[63:32] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*32 - src[i+31:i] := src[i+31:i] + src[i+32*len+31:i+32*len] - ENDFOR - RETURN REDUCE_ADD(src[32*len-1:0], len) - } - tmp := a - FOR j := 0 to 15 - i := j*32 - IF k[j] - tmp[i+31:i] := a[i+31:i] - ELSE - tmp[i+31:i] := 0 - FI - ENDFOR - dst[31:0] := REDUCE_ADD(tmp, 16) - - AVX512F -
immintrin.h
- Arithmetic + + + + Reduce the packed 32-bit integers in "a" by addition using mask "k". Returns the sum of all active elements in "a". + +DEFINE REDUCE_ADD(src, len) { + IF len == 2 + RETURN src[31:0] + src[63:32] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*32 + src[i+31:i] := src[i+31:i] + src[i+32*len+31:i+32*len] + ENDFOR + RETURN REDUCE_ADD(src[32*len-1:0], len) +} +tmp := a +FOR j := 0 to 15 + i := j*32 + IF k[j] + tmp[i+31:i] := a[i+31:i] + ELSE + tmp[i+31:i] := 0 + FI +ENDFOR +dst[31:0] := REDUCE_ADD(tmp, 16) + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Reduce the packed 64-bit integers in "a" by addition using mask "k". Returns - the sum of all active elements in "a". - - DEFINE REDUCE_ADD(src, len) { - IF len == 2 - RETURN src[63:0] + src[127:64] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*64 - src[i+63:i] := src[i+63:i] + src[i+64*len+63:i+64*len] - ENDFOR - RETURN REDUCE_ADD(src[64*len-1:0], len) - } - tmp := a - FOR j := 0 to 8 - i := j*64 - IF k[j] - tmp[i+63:i] := a[i+63:i] - ELSE - tmp[i+63:i] := 0 - FI - ENDFOR - dst[63:0] := REDUCE_ADD(tmp, 8) - - AVX512F -
immintrin.h
- Arithmetic + + + + Reduce the packed 64-bit integers in "a" by addition using mask "k". Returns the sum of all active elements in "a". + +DEFINE REDUCE_ADD(src, len) { + IF len == 2 + RETURN src[63:0] + src[127:64] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*64 + src[i+63:i] := src[i+63:i] + src[i+64*len+63:i+64*len] + ENDFOR + RETURN REDUCE_ADD(src[64*len-1:0], len) +} +tmp := a +FOR j := 0 to 8 + i := j*64 + IF k[j] + tmp[i+63:i] := a[i+63:i] + ELSE + tmp[i+63:i] := 0 + FI +ENDFOR +dst[63:0] := REDUCE_ADD(tmp, 8) + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Reduce the packed double-precision (64-bit) floating-point elements in "a" by - addition using mask "k". Returns the sum of all active elements in "a". - - DEFINE REDUCE_ADD(src, len) { - IF len == 2 - RETURN src[63:0] + src[127:64] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*64 - src[i+63:i] := src[i+63:i] + src[i+64*len+63:i+64*len] - ENDFOR - RETURN REDUCE_ADD(src[64*len-1:0], len) - } - tmp := a - FOR j := 0 to 8 - i := j*64 - IF k[j] - tmp[i+63:i] := a[i+63:i] - ELSE - tmp[i+63:i] := 0 - FI - ENDFOR - dst[63:0] := REDUCE_ADD(tmp, 8) - - AVX512F -
immintrin.h
- Arithmetic + + + + Reduce the packed double-precision (64-bit) floating-point elements in "a" by addition using mask "k". Returns the sum of all active elements in "a". + +DEFINE REDUCE_ADD(src, len) { + IF len == 2 + RETURN src[63:0] + src[127:64] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*64 + src[i+63:i] := src[i+63:i] + src[i+64*len+63:i+64*len] + ENDFOR + RETURN REDUCE_ADD(src[64*len-1:0], len) +} +tmp := a +FOR j := 0 to 8 + i := j*64 + IF k[j] + tmp[i+63:i] := a[i+63:i] + ELSE + tmp[i+63:i] := 0 + FI +ENDFOR +dst[63:0] := REDUCE_ADD(tmp, 8) + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Reduce the packed single-precision (32-bit) floating-point elements in "a" by - addition using mask "k". Returns the sum of all active elements in "a". - - DEFINE REDUCE_ADD(src, len) { - IF len == 2 - RETURN src[31:0] + src[63:32] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*32 - src[i+31:i] := src[i+31:i] + src[i+32*len+31:i+32*len] - ENDFOR - RETURN REDUCE_ADD(src[32*len-1:0], len) - } - tmp := a - FOR j := 0 to 16 - i := j*32 - IF k[j] - tmp[i+31:i] := a[i+31:i] - ELSE - tmp[i+31:i] := 0 - FI - ENDFOR - dst[31:0] := REDUCE_ADD(tmp, 16) - - AVX512F -
immintrin.h
- Arithmetic + + + + Reduce the packed single-precision (32-bit) floating-point elements in "a" by addition using mask "k". Returns the sum of all active elements in "a". + +DEFINE REDUCE_ADD(src, len) { + IF len == 2 + RETURN src[31:0] + src[63:32] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*32 + src[i+31:i] := src[i+31:i] + src[i+32*len+31:i+32*len] + ENDFOR + RETURN REDUCE_ADD(src[32*len-1:0], len) +} +tmp := a +FOR j := 0 to 16 + i := j*32 + IF k[j] + tmp[i+31:i] := a[i+31:i] + ELSE + tmp[i+31:i] := 0 + FI +ENDFOR +dst[31:0] := REDUCE_ADD(tmp, 16) + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Reduce the packed 32-bit integers in "a" by multiplication using mask "k". - Returns the product of all active elements in "a". - - DEFINE REDUCE_MUL(src, len) { - IF len == 2 - RETURN src[31:0] * src[63:32] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*32 - src[i+31:i] := src[i+31:i] * src[i+32*len+31:i+32*len] - ENDFOR - RETURN REDUCE_MUL(src[32*len-1:0], len) - } - tmp := a - FOR j := 0 to 16 - i := j*32 - IF k[j] - tmp[i+31:i] := a[i+31:i] - ELSE - tmp[i+31:i] := 1 - FI - ENDFOR - dst[31:0] := REDUCE_MUL(tmp, 16) - - AVX512F -
immintrin.h
- Arithmetic + + + + Reduce the packed 32-bit integers in "a" by multiplication using mask "k". Returns the product of all active elements in "a". + +DEFINE REDUCE_MUL(src, len) { + IF len == 2 + RETURN src[31:0] * src[63:32] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*32 + src[i+31:i] := src[i+31:i] * src[i+32*len+31:i+32*len] + ENDFOR + RETURN REDUCE_MUL(src[32*len-1:0], len) +} +tmp := a +FOR j := 0 to 16 + i := j*32 + IF k[j] + tmp[i+31:i] := a[i+31:i] + ELSE + tmp[i+31:i] := 1 + FI +ENDFOR +dst[31:0] := REDUCE_MUL(tmp, 16) + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Reduce the packed 64-bit integers in "a" by multiplication using mask "k". - Returns the product of all active elements in "a". - - DEFINE REDUCE_MUL(src, len) { - IF len == 2 - RETURN src[63:0] * src[127:64] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*64 - src[i+63:i] := src[i+63:i] * src[i+64*len+63:i+64*len] - ENDFOR - RETURN REDUCE_MUL(src[64*len-1:0], len) - } - tmp := a - FOR j := 0 to 8 - i := j*64 - IF k[j] - tmp[i+63:i] := a[i+63:i] - ELSE - tmp[i+63:i] := 1 - FI - ENDFOR - dst[63:0] := REDUCE_MUL(tmp, 8) - - AVX512F -
immintrin.h
- Arithmetic + + + + Reduce the packed 64-bit integers in "a" by multiplication using mask "k". Returns the product of all active elements in "a". + +DEFINE REDUCE_MUL(src, len) { + IF len == 2 + RETURN src[63:0] * src[127:64] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*64 + src[i+63:i] := src[i+63:i] * src[i+64*len+63:i+64*len] + ENDFOR + RETURN REDUCE_MUL(src[64*len-1:0], len) +} +tmp := a +FOR j := 0 to 8 + i := j*64 + IF k[j] + tmp[i+63:i] := a[i+63:i] + ELSE + tmp[i+63:i] := 1 + FI +ENDFOR +dst[63:0] := REDUCE_MUL(tmp, 8) + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Reduce the packed double-precision (64-bit) floating-point elements in "a" by - multiplication using mask "k". Returns the product of all active elements in "a". - - DEFINE REDUCE_MUL(src, len) { - IF len == 2 - RETURN src[63:0] * src[127:64] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*64 - src[i+63:i] := src[i+63:i] * src[i+64*len+63:i+64*len] - ENDFOR - RETURN REDUCE_MUL(src[64*len-1:0], len) - } - tmp := a - FOR j := 0 to 8 - i := j*64 - IF k[j] - tmp[i+63:i] := a[i+63:i] - ELSE - tmp[i+63:i] := 1.0 - FI - ENDFOR - dst[63:0] := REDUCE_MUL(tmp, 8) - - AVX512F -
immintrin.h
- Arithmetic + + + + Reduce the packed double-precision (64-bit) floating-point elements in "a" by multiplication using mask "k". Returns the product of all active elements in "a". + +DEFINE REDUCE_MUL(src, len) { + IF len == 2 + RETURN src[63:0] * src[127:64] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*64 + src[i+63:i] := src[i+63:i] * src[i+64*len+63:i+64*len] + ENDFOR + RETURN REDUCE_MUL(src[64*len-1:0], len) +} +tmp := a +FOR j := 0 to 8 + i := j*64 + IF k[j] + tmp[i+63:i] := a[i+63:i] + ELSE + tmp[i+63:i] := 1.0 + FI +ENDFOR +dst[63:0] := REDUCE_MUL(tmp, 8) + + AVX512F +
immintrin.h
+ Arithmetic
- - - - Reduce the packed single-precision (32-bit) floating-point elements in "a" by - multiplication using mask "k". Returns the product of all active elements in "a". - - DEFINE REDUCE_MUL(src, len) { - IF len == 2 - RETURN src[31:0] * src[63:32] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*32 - src[i+31:i] := src[i+31:i] * src[i+32*len+31:i+32*len] - ENDFOR - RETURN REDUCE_MUL(src[32*len-1:0], len) - } - tmp := a - FOR j := 0 to 16 - i := j*32 - IF k[j] - tmp[i+31:i] := a[i+31:i] - ELSE - tmp[i+31:i] := FP32(1.0) - FI - ENDFOR - dst[31:0] := REDUCE_MUL(tmp, 16) - - AVX512F -
immintrin.h
- Arithmetic + + + + Reduce the packed single-precision (32-bit) floating-point elements in "a" by multiplication using mask "k". Returns the product of all active elements in "a". + +DEFINE REDUCE_MUL(src, len) { + IF len == 2 + RETURN src[31:0] * src[63:32] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*32 + src[i+31:i] := src[i+31:i] * src[i+32*len+31:i+32*len] + ENDFOR + RETURN REDUCE_MUL(src[32*len-1:0], len) +} +tmp := a +FOR j := 0 to 16 + i := j*32 + IF k[j] + tmp[i+31:i] := a[i+31:i] + ELSE + tmp[i+31:i] := FP32(1.0) + FI +ENDFOR +dst[31:0] := REDUCE_MUL(tmp, 16) + + AVX512F +
immintrin.h
+ Arithmetic
- - - Reduce the packed 32-bit integers in "a" by addition. Returns the sum of all - elements in "a". - - DEFINE REDUCE_ADD(src, len) { - IF len == 2 - RETURN src[31:0] + src[63:32] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*32 - src[i+31:i] := src[i+31:i] + src[i+32*len+31:i+32*len] - ENDFOR - RETURN REDUCE_ADD(src[32*len-1:0], len) - } - dst[31:0] := REDUCE_ADD(a, 16) - - AVX512F -
immintrin.h
- Arithmetic + + + Reduce the packed 32-bit integers in "a" by addition. Returns the sum of all elements in "a". + +DEFINE REDUCE_ADD(src, len) { + IF len == 2 + RETURN src[31:0] + src[63:32] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*32 + src[i+31:i] := src[i+31:i] + src[i+32*len+31:i+32*len] + ENDFOR + RETURN REDUCE_ADD(src[32*len-1:0], len) +} +dst[31:0] := REDUCE_ADD(a, 16) + + AVX512F +
immintrin.h
+ Arithmetic
- - - Reduce the packed 64-bit integers in "a" by addition. Returns the sum of all - elements in "a". - - DEFINE REDUCE_ADD(src, len) { - IF len == 2 - RETURN src[63:0] + src[127:64] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*64 - src[i+63:i] := src[i+63:i] + src[i+64*len+63:i+64*len] - ENDFOR - RETURN REDUCE_ADD(src[64*len-1:0], len) - } - dst[63:0] := REDUCE_ADD(a, 8) - - AVX512F -
immintrin.h
- Arithmetic + + + Reduce the packed 64-bit integers in "a" by addition. Returns the sum of all elements in "a". + +DEFINE REDUCE_ADD(src, len) { + IF len == 2 + RETURN src[63:0] + src[127:64] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*64 + src[i+63:i] := src[i+63:i] + src[i+64*len+63:i+64*len] + ENDFOR + RETURN REDUCE_ADD(src[64*len-1:0], len) +} +dst[63:0] := REDUCE_ADD(a, 8) + + AVX512F +
immintrin.h
+ Arithmetic
- - - Reduce the packed double-precision (64-bit) floating-point elements in "a" by - addition. Returns the sum of all elements in "a". - - DEFINE REDUCE_ADD(src, len) { - IF len == 2 - RETURN src[63:0] + src[127:64] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*64 - src[i+63:i] := src[i+63:i] + src[i+64*len+63:i+64*len] - ENDFOR - RETURN REDUCE_ADD(src[64*len-1:0], len) - } - dst[63:0] := REDUCE_ADD(a, 8) - - AVX512F -
immintrin.h
- Arithmetic + + + Reduce the packed double-precision (64-bit) floating-point elements in "a" by addition. Returns the sum of all elements in "a". + +DEFINE REDUCE_ADD(src, len) { + IF len == 2 + RETURN src[63:0] + src[127:64] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*64 + src[i+63:i] := src[i+63:i] + src[i+64*len+63:i+64*len] + ENDFOR + RETURN REDUCE_ADD(src[64*len-1:0], len) +} +dst[63:0] := REDUCE_ADD(a, 8) + + AVX512F +
immintrin.h
+ Arithmetic
- - - Reduce the packed single-precision (32-bit) floating-point elements in "a" by - addition. Returns the sum of all elements in "a". - - DEFINE REDUCE_ADD(src, len) { - IF len == 2 - RETURN src[31:0] + src[63:32] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*32 - src[i+31:i] := src[i+31:i] + src[i+32*len+31:i+32*len] - ENDFOR - RETURN REDUCE_ADD(src[32*len-1:0], len) - } - dst[31:0] := REDUCE_ADD(a, 16) - - AVX512F -
immintrin.h
- Arithmetic + + + Reduce the packed single-precision (32-bit) floating-point elements in "a" by addition. Returns the sum of all elements in "a". + +DEFINE REDUCE_ADD(src, len) { + IF len == 2 + RETURN src[31:0] + src[63:32] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*32 + src[i+31:i] := src[i+31:i] + src[i+32*len+31:i+32*len] + ENDFOR + RETURN REDUCE_ADD(src[32*len-1:0], len) +} +dst[31:0] := REDUCE_ADD(a, 16) + + AVX512F +
immintrin.h
+ Arithmetic
- - - Reduce the packed 32-bit integers in "a" by multiplication. Returns the product - of all elements in "a". - - DEFINE REDUCE_MUL(src, len) { - IF len == 2 - RETURN src[31:0] * src[63:32] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*32 - src[i+31:i] := src[i+31:i] * src[i+32*len+31:i+32*len] - ENDFOR - RETURN REDUCE_MUL(src[32*len-1:0], len) - } - dst[31:0] := REDUCE_MUL(a, 16) - - AVX512F -
immintrin.h
- Arithmetic + + + Reduce the packed 32-bit integers in "a" by multiplication. Returns the product of all elements in "a". + +DEFINE REDUCE_MUL(src, len) { + IF len == 2 + RETURN src[31:0] * src[63:32] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*32 + src[i+31:i] := src[i+31:i] * src[i+32*len+31:i+32*len] + ENDFOR + RETURN REDUCE_MUL(src[32*len-1:0], len) +} +dst[31:0] := REDUCE_MUL(a, 16) + + AVX512F +
immintrin.h
+ Arithmetic
- - - Reduce the packed 64-bit integers in "a" by multiplication. Returns the product - of all elements in "a". - - DEFINE REDUCE_MUL(src, len) { - IF len == 2 - RETURN src[63:0] * src[127:64] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*64 - src[i+63:i] := src[i+63:i] * src[i+64*len+63:i+64*len] - ENDFOR - RETURN REDUCE_MUL(src[64*len-1:0], len) - } - dst[63:0] := REDUCE_MUL(a, 8) - - AVX512F -
immintrin.h
- Arithmetic + + + Reduce the packed 64-bit integers in "a" by multiplication. Returns the product of all elements in "a". + +DEFINE REDUCE_MUL(src, len) { + IF len == 2 + RETURN src[63:0] * src[127:64] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*64 + src[i+63:i] := src[i+63:i] * src[i+64*len+63:i+64*len] + ENDFOR + RETURN REDUCE_MUL(src[64*len-1:0], len) +} +dst[63:0] := REDUCE_MUL(a, 8) + + AVX512F +
immintrin.h
+ Arithmetic
- - - Reduce the packed double-precision (64-bit) floating-point elements in "a" by - multiplication. Returns the product of all elements in "a". - - DEFINE REDUCE_MUL(src, len) { - IF len == 2 - RETURN src[63:0] * src[127:64] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*64 - src[i+63:i] := src[i+63:i] * src[i+64*len+63:i+64*len] - ENDFOR - RETURN REDUCE_MUL(src[64*len-1:0], len) - } - dst[63:0] := REDUCE_MUL(a, 8) - - AVX512F -
immintrin.h
- Arithmetic + + + Reduce the packed double-precision (64-bit) floating-point elements in "a" by multiplication. Returns the product of all elements in "a". + +DEFINE REDUCE_MUL(src, len) { + IF len == 2 + RETURN src[63:0] * src[127:64] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*64 + src[i+63:i] := src[i+63:i] * src[i+64*len+63:i+64*len] + ENDFOR + RETURN REDUCE_MUL(src[64*len-1:0], len) +} +dst[63:0] := REDUCE_MUL(a, 8) + + AVX512F +
immintrin.h
+ Arithmetic
- - - Reduce the packed single-precision (32-bit) floating-point elements in "a" by - multiplication. Returns the product of all elements in "a". - - DEFINE REDUCE_MUL(src, len) { - IF len == 2 - RETURN src[31:0] * src[63:32] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*32 - src[i+31:i] := src[i+31:i] * src[i+32*len+31:i+32*len] - ENDFOR - RETURN REDUCE_MUL(src[32*len-1:0], len) - } - dst[31:0] := REDUCE_MUL(a, 16) - - AVX512F -
immintrin.h
- Arithmetic + + + Reduce the packed single-precision (32-bit) floating-point elements in "a" by multiplication. Returns the product of all elements in "a". + +DEFINE REDUCE_MUL(src, len) { + IF len == 2 + RETURN src[31:0] * src[63:32] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*32 + src[i+31:i] := src[i+31:i] * src[i+32*len+31:i+32*len] + ENDFOR + RETURN REDUCE_MUL(src[32*len-1:0], len) +} +dst[31:0] := REDUCE_MUL(a, 16) + + AVX512F +
immintrin.h
+ Arithmetic
- - - Finds the absolute value of each packed single-precision (32-bit) - floating-point element in "v2", storing the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := ABS(v2[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + Finds the absolute value of each packed single-precision (32-bit) floating-point element in "v2", storing the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ABS(v2[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Finds the absolute value of each packed single-precision (32-bit) - floating-point element in "v2", storing the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ABS(v2[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Finds the absolute value of each packed single-precision (32-bit) floating-point element in "v2", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ABS(v2[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - Finds the absolute value of each packed double-precision (64-bit) - floating-point element in "v2", storing the results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := ABS(v2[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + Finds the absolute value of each packed double-precision (64-bit) floating-point element in "v2", storing the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ABS(v2[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Finds the absolute value of each packed double-precision (64-bit) - floating-point element in "v2", storing the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ABS(v2[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Arithmetic + + + + + Finds the absolute value of each packed double-precision (64-bit) floating-point element in "v2", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ABS(v2[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Concatenate "a" and "b" into a 128-byte immediate result, shift the result - right by "imm8" 32-bit elements, and store the low 64 bytes (16 elements) in "dst". - - temp[1023:512] := a[511:0] - temp[511:0] := b[511:0] - temp[1023:0] := temp[1023:0] >> (32*imm8[3:0]) - dst[511:0] := temp[511:0] - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + Concatenate "a" and "b" into a 128-byte immediate result, shift the result right by "imm8" 32-bit elements, and store the low 64 bytes (16 elements) in "dst". + +temp[1023:512] := a[511:0] +temp[511:0] := b[511:0] +temp[1023:0] := temp[1023:0] >> (32*imm8[3:0]) +dst[511:0] := temp[511:0] +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Concatenate "a" and "b" into a 128-byte immediate result, shift the result - right by "imm8" 32-bit elements, and store the low 64 bytes (16 elements) in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - temp[1023:512] := a[511:0] - temp[511:0] := b[511:0] - temp[1023:0] := temp[1023:0] >> (32*imm8[3:0]) - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := temp[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Concatenate "a" and "b" into a 128-byte immediate result, shift the result right by "imm8" 32-bit elements, and store the low 64 bytes (16 elements) in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +temp[1023:512] := a[511:0] +temp[511:0] := b[511:0] +temp[1023:0] := temp[1023:0] >> (32*imm8[3:0]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := temp[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - Convert the exponent of each packed double-precision (64-bit) floating-point - element in "a" to a double-precision (64-bit) floating-point number representing the - integer exponent, and store the results in "dst". This intrinsic essentially calculates - "floor(log2(x))" for each element. - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := ConvertExpFP64(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - Convert the exponent of each packed double-precision (64-bit) floating-point - element in "a" to a double-precision (64-bit) floating-point number representing the - integer exponent, and store the results in "dst". This intrinsic essentially calculates - "floor(log2(x))" for each element. - [sae_note] - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := ConvertExpFP64(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + [sae_note] + FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Convert the exponent of each packed double-precision (64-bit) floating-point - element in "a" to a double-precision (64-bit) floating-point number representing the - integer exponent, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). This intrinsic - essentially calculates "floor(log2(x))" for each element. - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ConvertExpFP64(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Convert the exponent of each packed double-precision (64-bit) floating-point - element in "a" to a double-precision (64-bit) floating-point number representing the - integer exponent, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). This intrinsic - essentially calculates "floor(log2(x))" for each element. - [sae_note] - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ConvertExpFP64(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + [sae_note] + FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - Convert the exponent of each packed single-precision (32-bit) floating-point - element in "a" to a single-precision (32-bit) floating-point number representing the - integer exponent, and store the results in "dst". This intrinsic essentially calculates - "floor(log2(x))" for each element. - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := ConvertExpFP32(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - Convert the exponent of each packed single-precision (32-bit) floating-point - element in "a" to a single-precision (32-bit) floating-point number representing the - integer exponent, and store the results in "dst". This intrinsic essentially calculates - "floor(log2(x))" for each element. - [sae_note] - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := ConvertExpFP32(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + [sae_note] + FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Convert the exponent of each packed single-precision (32-bit) floating-point - element in "a" to a single-precision (32-bit) floating-point number representing the - integer exponent, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). This intrinsic - essentially calculates "floor(log2(x))" for each element. - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ConvertExpFP32(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Convert the exponent of each packed single-precision (32-bit) floating-point - element in "a" to a single-precision (32-bit) floating-point number representing the - integer exponent, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). This intrinsic - essentially calculates "floor(log2(x))" for each element. - [sae_note] - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ConvertExpFP32(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + [sae_note] + FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Normalize the mantissas of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst". This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by - "interv" and the sign depends on "sc" and the source sign. - [getmant_note] - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Normalize the mantissas of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst". This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by - "interv" and the sign depends on "sc" and the source sign. - [getmant_note][sae_note] - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note][sae_note] + FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Normalize the mantissas of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). This intrinsic essentially - calculates "±(2^k)*|x.significand|", where "k" depends on the interval range - defined by "interv" and the sign depends on "sc" and the source sign. - [getmant_note] - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - - Normalize the mantissas of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). This intrinsic essentially - calculates "±(2^k)*|x.significand|", where "k" depends on the interval range - defined by "interv" and the sign depends on "sc" and the source sign. - [getmant_note][sae_note] - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + + Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note][sae_note] + FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Normalize the mantissas of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst". This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by - "interv" and the sign depends on "sc" and the source sign. - [getmant_note] - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - Normalize the mantissas of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst". This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by - "interv" and the sign depends on "sc" and the source sign. - [getmant_note][sae_note] - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note][sae_note] + FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - Normalize the mantissas of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). This intrinsic essentially - calculates "±(2^k)*|x.significand|", where "k" depends on the interval range - defined by "interv" and the sign depends on "sc" and the source sign. - [getmant_note] - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note] + FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - - - - Normalize the mantissas of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). This intrinsic essentially - calculates "±(2^k)*|x.significand|", where "k" depends on the interval range - defined by "interv" and the sign depends on "sc" and the source sign. - [getmant_note][sae_note] - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Miscellaneous + + + + + + + + Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign. + [getmant_note][sae_note] + FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Miscellaneous
- - - - - Blend packed double-precision (64-bit) floating-point elements from "a" and "b" - using control mask "k", and store the results in "dst". - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := b[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Blend packed double-precision (64-bit) floating-point elements from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := b[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Blend packed single-precision (32-bit) floating-point elements from "a" and "b" - using control mask "k", and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := b[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Blend packed single-precision (32-bit) floating-point elements from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Blend packed 32-bit integers from "a" and "b" using control mask "k", and store - the results in "dst". - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := b[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Blend packed 32-bit integers from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Blend packed 64-bit integers from "a" and "b" using control mask "k", and store - the results in "dst". - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := b[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + Blend packed 64-bit integers from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := b[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle 32-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). Note that this intrinsic shuffles - across 128-bit lanes, unlike past intrinsics that use the "permutevar" name. This - intrinsic is identical to "_mm512_mask_permutexvar_epi32", and it is recommended that - you use that intrinsic name. - - FOR j := 0 to 15 - i := j*32 - id := idx[i+3:i]*32 - IF k[j] - dst[i+31:i] := a[id+31:id] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Note that this intrinsic shuffles across 128-bit lanes, unlike past intrinsics that use the "permutevar" name. This intrinsic is identical to "_mm512_mask_permutexvar_epi32", and it is recommended that you use that intrinsic name. + +FOR j := 0 to 15 + i := j*32 + id := idx[i+3:i]*32 + IF k[j] + dst[i+31:i] := a[id+31:id] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Shuffle 32-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst". Note that this intrinsic shuffles across 128-bit - lanes, unlike past intrinsics that use the "permutevar" name. This intrinsic is - identical to "_mm512_permutexvar_epi32", and it is recommended that you use that - intrinsic name. - - FOR j := 0 to 15 - i := j*32 - id := idx[i+3:i]*32 - dst[i+31:i] := a[id+31:id] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst". Note that this intrinsic shuffles across 128-bit lanes, unlike past intrinsics that use the "permutevar" name. This intrinsic is identical to "_mm512_permutexvar_epi32", and it is recommended that you use that intrinsic name. + +FOR j := 0 to 15 + i := j*32 + id := idx[i+3:i]*32 + dst[i+31:i] := a[id+31:id] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - - Shuffle 32-bit integers in "a" within 128-bit lanes using the control in - "imm8", and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) - tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) - tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) - tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) - tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4]) - tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6]) - tmp_dst[287:256] := SELECT4(a[383:256], imm8[1:0]) - tmp_dst[319:288] := SELECT4(a[383:256], imm8[3:2]) - tmp_dst[351:320] := SELECT4(a[383:256], imm8[5:4]) - tmp_dst[383:352] := SELECT4(a[383:256], imm8[7:6]) - tmp_dst[415:384] := SELECT4(a[511:384], imm8[1:0]) - tmp_dst[447:416] := SELECT4(a[511:384], imm8[3:2]) - tmp_dst[479:448] := SELECT4(a[511:384], imm8[5:4]) - tmp_dst[511:480] := SELECT4(a[511:384], imm8[7:6]) - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := tmp_dst[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + + + Shuffle 32-bit integers in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4]) +tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6]) +tmp_dst[287:256] := SELECT4(a[383:256], imm8[1:0]) +tmp_dst[319:288] := SELECT4(a[383:256], imm8[3:2]) +tmp_dst[351:320] := SELECT4(a[383:256], imm8[5:4]) +tmp_dst[383:352] := SELECT4(a[383:256], imm8[7:6]) +tmp_dst[415:384] := SELECT4(a[511:384], imm8[1:0]) +tmp_dst[447:416] := SELECT4(a[511:384], imm8[3:2]) +tmp_dst[479:448] := SELECT4(a[511:384], imm8[5:4]) +tmp_dst[511:480] := SELECT4(a[511:384], imm8[7:6]) +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := tmp_dst[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - Shuffle 32-bit integers in "a" within 128-bit lanes using the control in - "imm8", and store the results in "dst". - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - dst[95:64] := SELECT4(a[127:0], imm8[5:4]) - dst[127:96] := SELECT4(a[127:0], imm8[7:6]) - dst[159:128] := SELECT4(a[255:128], imm8[1:0]) - dst[191:160] := SELECT4(a[255:128], imm8[3:2]) - dst[223:192] := SELECT4(a[255:128], imm8[5:4]) - dst[255:224] := SELECT4(a[255:128], imm8[7:6]) - dst[287:256] := SELECT4(a[383:256], imm8[1:0]) - dst[319:288] := SELECT4(a[383:256], imm8[3:2]) - dst[351:320] := SELECT4(a[383:256], imm8[5:4]) - dst[383:352] := SELECT4(a[383:256], imm8[7:6]) - dst[415:384] := SELECT4(a[511:384], imm8[1:0]) - dst[447:416] := SELECT4(a[511:384], imm8[3:2]) - dst[479:448] := SELECT4(a[511:384], imm8[5:4]) - dst[511:480] := SELECT4(a[511:384], imm8[7:6]) - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Swizzle + + + + Shuffle 32-bit integers in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst". + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +dst[127:96] := SELECT4(a[127:0], imm8[7:6]) +dst[159:128] := SELECT4(a[255:128], imm8[1:0]) +dst[191:160] := SELECT4(a[255:128], imm8[3:2]) +dst[223:192] := SELECT4(a[255:128], imm8[5:4]) +dst[255:224] := SELECT4(a[255:128], imm8[7:6]) +dst[287:256] := SELECT4(a[383:256], imm8[1:0]) +dst[319:288] := SELECT4(a[383:256], imm8[3:2]) +dst[351:320] := SELECT4(a[383:256], imm8[5:4]) +dst[383:352] := SELECT4(a[383:256], imm8[7:6]) +dst[415:384] := SELECT4(a[511:384], imm8[1:0]) +dst[447:416] := SELECT4(a[511:384], imm8[3:2]) +dst[479:448] := SELECT4(a[511:384], imm8[5:4]) +dst[511:480] := SELECT4(a[511:384], imm8[7:6]) +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Swizzle
- - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k". - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 7 - i := j*64 - k[j] := (a[i+63:i] OP b[i+63:i]) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 7 + i := j*64 + k[j] := (a[i+63:i] OP b[i+63:i]) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k". [sae_note] - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 7 - i := j*64 - k[j] := (a[i+63:i] OP b[i+63:i]) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". [sae_note] + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 7 + i := j*64 + k[j] := (a[i+63:i] OP b[i+63:i]) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - for equality, and store the results in mask vector "k". - - FOR j := 0 to 7 - i := j*64 - k[j] := (a[i+63:i] == b[i+63:i]) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := (a[i+63:i] == b[i+63:i]) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - for less-than-or-equal, and store the results in mask vector "k". - - FOR j := 0 to 7 - i := j*64 - k[j] := (a[i+63:i] <= b[i+63:i]) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := (a[i+63:i] <= b[i+63:i]) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - for less-than, and store the results in mask vector "k". - - FOR j := 0 to 7 - i := j*64 - k[j] := (a[i+63:i] < b[i+63:i]) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := (a[i+63:i] < b[i+63:i]) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - for not-equal, and store the results in mask vector "k". - - FOR j := 0 to 7 - i := j*64 - k[j] := (a[i+63:i] != b[i+63:i]) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := (a[i+63:i] != b[i+63:i]) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - for not-less-than-or-equal, and store the results in mask vector "k". - - FOR j := 0 to 7 - i := j*64 - k[j] := (!(a[i+63:i] <= b[i+63:i])) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := (!(a[i+63:i] <= b[i+63:i])) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - for not-less-than, and store the results in mask vector "k". - - FOR j := 0 to 7 - i := j*64 - k[j] := (!(a[i+63:i] < b[i+63:i])) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-less-than, and store the results in mask vector "k". + +FOR j := 0 to 7 + i := j*64 + k[j] := (!(a[i+63:i] < b[i+63:i])) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - to see if neither is NaN, and store the results in mask vector "k". - FOR j := 0 to 7 - i := j*64 - k[j] := (a[i+63:i] != NaN AND b[i+63:i] != NaN) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" to see if neither is NaN, and store the results in mask vector "k". + FOR j := 0 to 7 + i := j*64 + k[j] := (a[i+63:i] != NaN AND b[i+63:i] != NaN) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - to see if either is NaN, and store the results in mask vector "k". - FOR j := 0 to 7 - i := j*64 - k[j] := (a[i+63:i] == NaN OR b[i+63:i] == NaN) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" to see if either is NaN, and store the results in mask vector "k". + FOR j := 0 to 7 + i := j*64 + k[j] := (a[i+63:i] == NaN OR b[i+63:i] == NaN) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit - is not set). - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit - is not set). [sae_note] - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - for equality, and store the results in mask vector "k" using zeromask "k1" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := (a[i+63:i] == b[i+63:i]) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := (a[i+63:i] == b[i+63:i]) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := (a[i+63:i] <= b[i+63:i]) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := (a[i+63:i] <= b[i+63:i]) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - for less-than, and store the results in mask vector "k" using zeromask "k1" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := (a[i+63:i] < b[i+63:i]) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := (a[i+63:i] < b[i+63:i]) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := (a[i+63:i] != b[i+63:i]) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := (a[i+63:i] != b[i+63:i]) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - for not-less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := (!(a[i+63:i] <= b[i+63:i])) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := (!(a[i+63:i] <= b[i+63:i])) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - for not-less-than, and store the results in mask vector "k" using zeromask "k1" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := (!(a[i+63:i] < b[i+63:i])) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := (!(a[i+63:i] < b[i+63:i])) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - to see if neither is NaN, and store the results in mask vector "k" using zeromask "k1" - (elements are zeroed out when the corresponding mask bit is not set). - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := (a[i+63:i] != NaN AND b[i+63:i] != NaN) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" to see if neither is NaN, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := (a[i+63:i] != NaN AND b[i+63:i] != NaN) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - to see if either is NaN, and store the results in mask vector "k" using zeromask "k1" - (elements are zeroed out when the corresponding mask bit is not set). - FOR j := 0 to 7 - i := j*64 - IF k1[j] - k[j] := (a[i+63:i] == NaN OR b[i+63:i] == NaN) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" to see if either is NaN, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + FOR j := 0 to 7 + i := j*64 + IF k1[j] + k[j] := (a[i+63:i] == NaN OR b[i+63:i] == NaN) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k". - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 15 - i := j*32 - k[j] := (a[i+31:i] OP b[i+31:i]) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 15 + i := j*32 + k[j] := (a[i+31:i] OP b[i+31:i]) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k". [sae_note] - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 15 - i := j*32 - k[j] := (a[i+31:i] OP b[i+31:i]) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". [sae_note] + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 15 + i := j*32 + k[j] := (a[i+31:i] OP b[i+31:i]) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - for equality, and store the results in mask vector "k". - - FOR j := 0 to 15 - i := j*32 - k[j] := (a[i+31:i] == b[i+31:i]) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := (a[i+31:i] == b[i+31:i]) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - for less-than-or-equal, and store the results in mask vector "k". - - FOR j := 0 to 15 - i := j*32 - k[j] := (a[i+31:i] <= b[i+31:i]) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := (a[i+31:i] <= b[i+31:i]) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - for less-than, and store the results in mask vector "k". - - FOR j := 0 to 15 - i := j*32 - k[j] := (a[i+31:i] < b[i+31:i]) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := (a[i+31:i] < b[i+31:i]) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - for not-equal, and store the results in mask vector "k". - - FOR j := 0 to 15 - i := j*32 - k[j] := (a[i+31:i] != b[i+31:i]) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := (a[i+31:i] != b[i+31:i]) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - for not-less-than-or-equal, and store the results in mask vector "k". - - FOR j := 0 to 15 - i := j*32 - k[j] := (!(a[i+31:i] <= b[i+31:i])) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := (!(a[i+31:i] <= b[i+31:i])) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - for not-less-than, and store the results in mask vector "k". - - FOR j := 0 to 15 - i := j*32 - k[j] := (!(a[i+31:i] < b[i+31:i])) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-less-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := (!(a[i+31:i] < b[i+31:i])) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - to see if neither is NaN, and store the results in mask vector "k". - FOR j := 0 to 15 - i := j*32 - k[j] := ((a[i+31:i] != NaN) AND (b[i+31:i] != NaN)) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" to see if neither is NaN, and store the results in mask vector "k". + FOR j := 0 to 15 + i := j*32 + k[j] := ((a[i+31:i] != NaN) AND (b[i+31:i] != NaN)) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - to see if either is NaN, and store the results in mask vector "k". - FOR j := 0 to 15 - i := j*32 - k[j] := ((a[i+31:i] == NaN) OR (b[i+31:i] == NaN)) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" to see if either is NaN, and store the results in mask vector "k". + FOR j := 0 to 15 + i := j*32 + k[j] := ((a[i+31:i] == NaN) OR (b[i+31:i] == NaN)) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit - is not set). - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit - is not set). [sae_note] - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - for equality, and store the results in mask vector "k" using zeromask "k1" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := (a[i+31:i] == b[i+31:i]) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := (a[i+31:i] == b[i+31:i]) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := (a[i+31:i] <= b[i+31:i]) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := (a[i+31:i] <= b[i+31:i]) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - for less-than, and store the results in mask vector "k" using zeromask "k1" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := (a[i+31:i] < b[i+31:i]) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := (a[i+31:i] < b[i+31:i]) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := (a[i+31:i] != b[i+31:i]) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := (a[i+31:i] != b[i+31:i]) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - for not-less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := (!(a[i+31:i] <= b[i+31:i])) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := (!(a[i+31:i] <= b[i+31:i])) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - for not-less-than, and store the results in mask vector "k" using zeromask "k1" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := (!(a[i+31:i] < b[i+31:i])) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := (!(a[i+31:i] < b[i+31:i])) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - to see if neither is NaN, and store the results in mask vector "k" using zeromask "k1" - (elements are zeroed out when the corresponding mask bit is not set). - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := ((a[i+31:i] != NaN) AND (b[i+31:i] != NaN)) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" to see if neither is NaN, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ((a[i+31:i] != NaN) AND (b[i+31:i] != NaN)) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - to see if either is NaN, and store the results in mask vector "k" using zeromask "k1" - (elements are zeroed out when the corresponding mask bit is not set). - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := ((a[i+31:i] == NaN) OR (b[i+31:i] == NaN)) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" to see if either is NaN, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ((a[i+31:i] == NaN) OR (b[i+31:i] == NaN)) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed signed 32-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k". - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 15 - i := j*32 - k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed signed 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed 32-bit integers in "a" and "b" for equality, and store the - results in mask vector "k". - - FOR j := 0 to 15 - i := j*32 - k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed signed 32-bit integers in "a" and "b" for greater-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 15 - i := j*32 - k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed signed 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed signed 32-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k". - - FOR j := 0 to 15 - i := j*32 - k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed signed 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed signed 32-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 15 - i := j*32 - k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed signed 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed 32-bit integers in "a" and "b" for not-equal, and store the - results in mask vector "k". - - FOR j := 0 to 15 - i := j*32 - k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - - Compare packed signed 32-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + + Compare packed signed 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed 32-bit integers in "a" and "b" for equality, and store the - results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed signed 32-bit integers in "a" and "b" for greater-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed signed 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed signed 32-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed signed 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed signed 32-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed signed 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed 32-bit integers in "a" and "b" for not-equal, and store the - results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 32-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k". - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 15 - i := j*32 - k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed unsigned 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed unsigned 32-bit integers in "a" and "b" for equality, and store - the results in mask vector "k". - - FOR j := 0 to 15 - i := j*32 - k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed unsigned 32-bit integers in "a" and "b" for - greater-than-or-equal, and store the results in mask vector "k". - - FOR j := 0 to 15 - i := j*32 - k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed unsigned 32-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k". - - FOR j := 0 to 15 - i := j*32 - k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed unsigned 32-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k". - - FOR j := 0 to 15 - i := j*32 - k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed unsigned 32-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k". - - FOR j := 0 to 15 - i := j*32 - k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - Compare packed unsigned 32-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k". - - FOR j := 0 to 15 - i := j*32 - k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + Compare packed unsigned 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k". + +FOR j := 0 to 15 + i := j*32 + k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - - Compare packed unsigned 32-bit integers in "a" and "b" based on the comparison - operand specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). - CASE (imm8[2:0]) OF - 0: OP := _MM_CMPINT_EQ - 1: OP := _MM_CMPINT_LT - 2: OP := _MM_CMPINT_LE - 3: OP := _MM_CMPINT_FALSE - 4: OP := _MM_CMPINT_NE - 5: OP := _MM_CMPINT_NLT - 6: OP := _MM_CMPINT_NLE - 7: OP := _MM_CMPINT_TRUE - ESAC - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + + Compare packed unsigned 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[2:0]) OF +0: OP := _MM_CMPINT_EQ +1: OP := _MM_CMPINT_LT +2: OP := _MM_CMPINT_LE +3: OP := _MM_CMPINT_FALSE +4: OP := _MM_CMPINT_NE +5: OP := _MM_CMPINT_NLT +6: OP := _MM_CMPINT_NLE +7: OP := _MM_CMPINT_TRUE +ESAC +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 32-bit integers in "a" and "b" for equality, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed unsigned 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 32-bit integers in "a" and "b" for - greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed unsigned 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] >= b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 32-bit integers in "a" and "b" for greater-than, and - store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed unsigned 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] > b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 32-bit integers in "a" and "b" for less-than, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed unsigned 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] <= b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 32-bit integers in "a" and "b" for less-than-or-equal, - and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out - when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed unsigned 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] < b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Compare packed unsigned 32-bit integers in "a" and "b" for not-equal, and store - the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Compare + + + + + Compare packed unsigned 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Compare
- - - - - Gather single-precision (32-bit) floating-point elements from memory using - 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and - offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 15 - i := j*32 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + Gather single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 15 + i := j*32 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - - - Gather single-precision (32-bit) floating-point elements from memory using - 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and - offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 - or 8. - - FOR j := 0 to 15 - i := j*32 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + + + Gather single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 15 + i := j*32 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - Load 512-bits (composed of 8 packed double-precision (64-bit) floating-point - elements) from memory into "dst". - "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may - be generated. - - dst[511:0] := MEM[mem_addr+511:mem_addr] - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + Load 512-bits (composed of 8 packed double-precision (64-bit) floating-point elements) from memory into "dst". + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +dst[511:0] := MEM[mem_addr+511:mem_addr] +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - Load packed double-precision (64-bit) floating-point elements from memory into - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). "mem_addr" must be aligned on a 64-byte boundary or a - general-protection exception may be generated. - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + Load packed double-precision (64-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - Load 512-bits (composed of 16 packed single-precision (32-bit) floating-point - elements) from memory into "dst". - "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may - be generated. - - dst[511:0] := MEM[mem_addr+511:mem_addr] - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + Load 512-bits (composed of 16 packed single-precision (32-bit) floating-point elements) from memory into "dst". + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +dst[511:0] := MEM[mem_addr+511:mem_addr] +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - Load packed single-precision (32-bit) floating-point elements from memory into - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). "mem_addr" must be aligned on a 64-byte boundary or a - general-protection exception may be generated. - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + Load packed single-precision (32-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - Load 512-bits (composed of 16 packed 32-bit integers) from memory into "dst". - "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may - be generated. - - dst[511:0] := MEM[mem_addr+511:mem_addr] - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + Load 512-bits (composed of 16 packed 32-bit integers) from memory into "dst". + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +dst[511:0] := MEM[mem_addr+511:mem_addr] +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - Load 512-bits of integer data from memory into "dst". - "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may - be generated. - - dst[511:0] := MEM[mem_addr+511:mem_addr] - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + Load 512-bits of integer data from memory into "dst". + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +dst[511:0] := MEM[mem_addr+511:mem_addr] +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - Load packed 32-bit integers from memory into "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may - be generated. - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + Load packed 32-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - Load 512-bits (composed of 8 packed 64-bit integers) from memory into "dst". - "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may - be generated. - - dst[511:0] := MEM[mem_addr+511:mem_addr] - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + Load 512-bits (composed of 8 packed 64-bit integers) from memory into "dst". + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +dst[511:0] := MEM[mem_addr+511:mem_addr] +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - Load packed 64-bit integers from memory into "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may - be generated. - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + Load packed 64-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are - loaded from addresses starting at "base_addr" and offset by each 32-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst". "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 15 - i := j*32 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 15 + i := j*32 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - - - Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are - loaded from addresses starting at "base_addr" and offset by each 32-bit element in - "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged - into "dst" using writemask "k" (elements are copied from "src" when the corresponding - mask bit is not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 15 - i := j*32 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+31:i] := MEM[addr+31:addr] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + + + Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 15 + i := j*32 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+31:i] := MEM[addr+31:addr] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - Loads 8 64-bit integer elements from memory starting at location "base_addr" at - packed 32-bit integer indices stored in the lower half of "vindex" scaled by "scale" and - stores them in "dst". - - FOR j := 0 to 7 - i := j*64 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + Loads 8 64-bit integer elements from memory starting at location "base_addr" at packed 32-bit integer indices stored in the lower half of "vindex" scaled by "scale" and stores them in "dst". + +FOR j := 0 to 7 + i := j*64 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - - - Loads 8 64-bit integer elements from memory starting at location "base_addr" at - packed 32-bit integer indices stored in the lower half of "vindex" scaled by "scale" and - stores them in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + + + Loads 8 64-bit integer elements from memory starting at location "base_addr" at packed 32-bit integer indices stored in the lower half of "vindex" scaled by "scale" and stores them in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - Loads 8 double-precision (64-bit) floating-point elements stored at memory - locations starting at location "base_addr" at packed 32-bit integer indices stored in - the lower half of "vindex" scaled by "scale" them in "dst". - - FOR j := 0 to 7 - i := j*64 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + Loads 8 double-precision (64-bit) floating-point elements stored at memory locations starting at location "base_addr" at packed 32-bit integer indices stored in the lower half of "vindex" scaled by "scale" them in "dst". + +FOR j := 0 to 7 + i := j*64 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - - - Loads 8 double-precision (64-bit) floating-point elements from memory starting - at location "base_addr" at packed 32-bit integer indices stored in the lower half of - "vindex" scaled by "scale" into "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - dst[i+63:i] := MEM[addr+63:addr] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Load + + + + + + + Loads 8 double-precision (64-bit) floating-point elements from memory starting at location "base_addr" at packed 32-bit integer indices stored in the lower half of "vindex" scaled by "scale" into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + dst[i+63:i] := MEM[addr+63:addr] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Load
- - - - - Move packed double-precision (64-bit) floating-point elements from "a" to "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Move + + + + + Move packed double-precision (64-bit) floating-point elements from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Move
- - - - - Move packed single-precision (32-bit) floating-point elements from "a" to "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Move + + + + + Move packed single-precision (32-bit) floating-point elements from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Move
- - - - - Move packed 32-bit integers from "a" to "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Move + + + + + Move packed 32-bit integers from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Move
- - - - - Move packed 64-bit integers from "a" to "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Move + + + + + Move packed 64-bit integers from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Move
- - - - - Store packed double-precision (64-bit) floating-point elements from "a" into - memory using writemask "k". - "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may - be generated. - - FOR j := 0 to 7 - i := j*64 - IF k[j] - MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] - FI - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + Store packed double-precision (64-bit) floating-point elements from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - Store 512-bits (composed of 8 packed double-precision (64-bit) floating-point - elements) from "a" into memory. - "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+511:mem_addr] := a[511:0] - - - AVX512F -
immintrin.h
- Store + + + + Store 512-bits (composed of 8 packed double-precision (64-bit) floating-point elements) from "a" into memory. + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + + AVX512F +
immintrin.h
+ Store
- - - - - Store packed single-precision (32-bit) floating-point elements from "a" into - memory using writemask "k". - "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may - be generated. - - FOR j := 0 to 15 - i := j*32 - IF k[j] - MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] - FI - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + Store packed single-precision (32-bit) floating-point elements from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - Store 512-bits (composed of 16 packed single-precision (32-bit) floating-point - elements) from "a" into memory. - "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+511:mem_addr] := a[511:0] - - - AVX512F -
immintrin.h
- Store + + + + Store 512-bits (composed of 16 packed single-precision (32-bit) floating-point elements) from "a" into memory. + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + + AVX512F +
immintrin.h
+ Store
- - - - - Store packed 32-bit integers from "a" into memory using writemask "k". - "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may - be generated. - - FOR j := 0 to 15 - i := j*32 - IF k[j] - MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] - FI - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + Store packed 32-bit integers from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 15 + i := j*32 + IF k[j] + MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i] + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - Store 512-bits (composed of 16 packed 32-bit integers) from "a" into memory. - "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+511:mem_addr] := a[511:0] - - - AVX512F -
immintrin.h
- Store + + + + Store 512-bits (composed of 16 packed 32-bit integers) from "a" into memory. + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + + AVX512F +
immintrin.h
+ Store
- - - - Store 512-bits of integer data from "a" into memory. - "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+511:mem_addr] := a[511:0] - - - AVX512F -
immintrin.h
- Store + + + + Store 512-bits of integer data from "a" into memory. + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + + AVX512F +
immintrin.h
+ Store
- - - - - Store packed 64-bit integers from "a" into memory using writemask "k". - "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may - be generated. - - FOR j := 0 to 7 - i := j*64 - IF k[j] - MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] - FI - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + Store packed 64-bit integers from "a" into memory using writemask "k". + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +FOR j := 0 to 7 + i := j*64 + IF k[j] + MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i] + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - Store 512-bits (composed of 8 packed 64-bit integers) from "a" into memory. - "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+511:mem_addr] := a[511:0] - - - AVX512F -
immintrin.h
- Store + + + + Store 512-bits (composed of 8 packed 64-bit integers) from "a" into memory. + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + + AVX512F +
immintrin.h
+ Store
- - - - - - Scatter 32-bit integers from "a" into memory using 32-bit indices. 32-bit - elements are stored at addresses starting at "base_addr" and offset by each 32-bit - element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 15 - i := j*32 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+31:addr] := a[i+31:i] - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + + Scatter 32-bit integers from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 15 + i := j*32 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+31:addr] := a[i+31:i] +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - - - - Scatter 32-bit integers from "a" into memory using 32-bit indices. 32-bit - elements are stored at addresses starting at "base_addr" and offset by each 32-bit - element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" - (elements are not stored when the corresponding mask bit is not set). "scale" should be - 1, 2, 4 or 8. - - FOR j := 0 to 15 - i := j*32 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+31:addr] := a[i+31:i] - FI - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + + + Scatter 32-bit integers from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 15 + i := j*32 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+31:addr] := a[i+31:i] + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - - - Scatter single-precision (32-bit) floating-point elements from "a" into memory - using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" - and offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale"). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 15 - i := j*32 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+31:addr] := a[i+31:i] - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + + Scatter single-precision (32-bit) floating-point elements from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 15 + i := j*32 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+31:addr] := a[i+31:i] +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - - - - Scatter single-precision (32-bit) floating-point elements from "a" into memory - using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" - and offset by each 32-bit element in "vindex" (each index is scaled by the factor in - "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is - not set). "scale" should be 1, 2, 4 or 8. - - FOR j := 0 to 15 - i := j*32 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+31:addr] := a[i+31:i] - FI - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + + + Scatter single-precision (32-bit) floating-point elements from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8. + +FOR j := 0 to 15 + i := j*32 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+31:addr] := a[i+31:i] + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - - - Stores 8 packed double-precision (64-bit) floating-point elements in "a" and to - memory locations starting at location "base_addr" at packed 32-bit integer indices - stored in "vindex" scaled by "scale". - - FOR j := 0 to 7 - i := j*64 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + + Stores 8 packed double-precision (64-bit) floating-point elements in "a" and to memory locations starting at location "base_addr" at packed 32-bit integer indices stored in "vindex" scaled by "scale". + +FOR j := 0 to 7 + i := j*64 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - - - - Stores 8 packed double-precision (64-bit) floating-point elements in "a" to - memory locations starting at location "base_addr" at packed 32-bit integer indices - stored in "vindex" scaled by "scale". Only those elements whose corresponding mask bit - is set in writemask "k" are written to memory. - - FOR j := 0 to 7 - i := j*64 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - FI - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + + + Stores 8 packed double-precision (64-bit) floating-point elements in "a" to memory locations starting at location "base_addr" at packed 32-bit integer indices stored in "vindex" scaled by "scale". Only those elements whose corresponding mask bit is set in writemask "k" are written to memory. + +FOR j := 0 to 7 + i := j*64 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - Compute the bitwise AND of packed 32-bit integers in "a" and "b", and store the - results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := a[i+31:i] AND b[i+31:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + Compute the bitwise AND of packed 32-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] AND b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - Compute the bitwise AND of 512 bits (representing integer data) in "a" and "b", - and store the result in "dst". - - dst[511:0] := (a[511:0] AND b[511:0]) - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + Compute the bitwise AND of 512 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[511:0] := (a[511:0] AND b[511:0]) +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", - and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := (NOT a[i+31:i]) AND b[i+31:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := (NOT a[i+31:i]) AND b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - Compute the bitwise NOT of 512 bits (representing integer data) in "a" and then - AND with "b", and store the result in "dst". - - dst[511:0] := ((NOT a[511:0]) AND b[511:0]) - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + Compute the bitwise NOT of 512 bits (representing integer data) in "a" and then AND with "b", and store the result in "dst". + +dst[511:0] := ((NOT a[511:0]) AND b[511:0]) +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - - - Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + + + Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - Compute the bitwise NOT of 512 bits (composed of packed 64-bit integers) in "a" - and then AND with "b", and store the results in "dst". - - dst[511:0] := ((NOT a[511:0]) AND b[511:0]) - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + Compute the bitwise NOT of 512 bits (composed of packed 64-bit integers) in "a" and then AND with "b", and store the results in "dst". + +dst[511:0] := ((NOT a[511:0]) AND b[511:0]) +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - - - Compute the bitwise NOT of packed 64-bit integers in "a" and then AND with "b", - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + + + Compute the bitwise NOT of packed 64-bit integers in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - Compute the bitwise AND of 512 bits (composed of packed 64-bit integers) in "a" - and "b", and store the results in "dst". - - dst[511:0] := (a[511:0] AND b[511:0]) - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + Compute the bitwise AND of 512 bits (composed of packed 64-bit integers) in "a" and "b", and store the results in "dst". + +dst[511:0] := (a[511:0] AND b[511:0]) +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - - - Compute the bitwise AND of packed 64-bit integers in "a" and "b", and store the - results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] AND b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + + + Compute the bitwise AND of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] AND b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - - - Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the - results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] OR b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + + + Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] OR b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the - results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := a[i+31:i] OR b[i+31:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] OR b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - Compute the bitwise OR of 512 bits (representing integer data) in "a" and "b", - and store the result in "dst". - - dst[511:0] := (a[511:0] OR b[511:0]) - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + Compute the bitwise OR of 512 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[511:0] := (a[511:0] OR b[511:0]) +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - - - Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the - results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] OR b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + + + Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] OR b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the - resut in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := a[i+63:i] OR b[i+63:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the resut in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] OR b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - - Compute the bitwise AND of packed 32-bit integers in "a" and "b", producing - intermediate 32-bit values, and set the corresponding bit in result mask "k" (subject to - writemask "k") if the intermediate value is non-zero. - - FOR j := 0 to 15 - i := j*32 - IF k1[j] - k[j] := ((a[i+31:i] AND b[i+31:i]) != 0) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + + Compute the bitwise AND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero. + +FOR j := 0 to 15 + i := j*32 + IF k1[j] + k[j] := ((a[i+31:i] AND b[i+31:i]) != 0) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - Compute the bitwise AND of packed 32-bit integers in "a" and "b", producing - intermediate 32-bit values, and set the corresponding bit in result mask "k" if the - intermediate value is non-zero. - - FOR j := 0 to 15 - i := j*32 - k[j] := ((a[i+31:i] AND b[i+31:i]) != 0) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + Compute the bitwise AND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero. + +FOR j := 0 to 15 + i := j*32 + k[j] := ((a[i+31:i] AND b[i+31:i]) != 0) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - - - Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the - results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] XOR b[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + + + Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the - results in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := a[i+31:i] XOR b[i+31:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - Compute the bitwise XOR of 512 bits (representing integer data) in "a" and "b", - and store the result in "dst". - - dst[511:0] := (a[511:0] XOR b[511:0]) - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + Compute the bitwise XOR of 512 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[511:0] := (a[511:0] XOR b[511:0]) +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - - - Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the - results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := a[i+63:i] XOR b[i+63:i] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + + + Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the - results in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := a[i+63:i] XOR b[i+63:i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - Reduce the packed 32-bit integers in "a" by bitwise AND using mask "k". Returns - the bitwise AND of all active elements in "a". - - DEFINE REDUCE_AND(src, len) { - IF len == 2 - RETURN src[31:0] AND src[63:32] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*32 - src[i+31:i] := src[i+31:i] AND src[i+32*len+31:i+32*len] - ENDFOR - RETURN REDUCE_AND(src[32*len-1:0], len) - } - tmp := a - FOR j := 0 to 16 - i := j*32 - IF k[j] - tmp[i+31:i] := a[i+31:i] - ELSE - tmp[i+31:i] := 0xFFFFFFFF - FI - ENDFOR - dst[31:0] := REDUCE_AND(tmp, 16) - - AVX512F -
immintrin.h
- Logical + + + + Reduce the packed 32-bit integers in "a" by bitwise AND using mask "k". Returns the bitwise AND of all active elements in "a". + +DEFINE REDUCE_AND(src, len) { + IF len == 2 + RETURN src[31:0] AND src[63:32] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*32 + src[i+31:i] := src[i+31:i] AND src[i+32*len+31:i+32*len] + ENDFOR + RETURN REDUCE_AND(src[32*len-1:0], len) +} +tmp := a +FOR j := 0 to 16 + i := j*32 + IF k[j] + tmp[i+31:i] := a[i+31:i] + ELSE + tmp[i+31:i] := 0xFFFFFFFF + FI +ENDFOR +dst[31:0] := REDUCE_AND(tmp, 16) + + AVX512F +
immintrin.h
+ Logical
- - - - Reduce the packed 64-bit integers in "a" by bitwise AND using mask "k". Returns - the bitwise AND of all active elements in "a". - - DEFINE REDUCE_AND(src, len) { - IF len == 2 - RETURN src[63:0] AND src[127:64] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*64 - src[i+63:i] := src[i+63:i] AND src[i+64*len+63:i+64*len] - ENDFOR - RETURN REDUCE_AND(src[64*len-1:0], len) - } - tmp := a - FOR j := 0 to 8 - i := j*64 - IF k[j] - tmp[i+63:i] := a[i+63:i] - ELSE - tmp[i+63:i] := 0xFFFFFFFFFFFFFFFF - FI - ENDFOR - dst[63:0] := REDUCE_AND(tmp, 8) - - AVX512F -
immintrin.h
- Logical + + + + Reduce the packed 64-bit integers in "a" by bitwise AND using mask "k". Returns the bitwise AND of all active elements in "a". + +DEFINE REDUCE_AND(src, len) { + IF len == 2 + RETURN src[63:0] AND src[127:64] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*64 + src[i+63:i] := src[i+63:i] AND src[i+64*len+63:i+64*len] + ENDFOR + RETURN REDUCE_AND(src[64*len-1:0], len) +} +tmp := a +FOR j := 0 to 8 + i := j*64 + IF k[j] + tmp[i+63:i] := a[i+63:i] + ELSE + tmp[i+63:i] := 0xFFFFFFFFFFFFFFFF + FI +ENDFOR +dst[63:0] := REDUCE_AND(tmp, 8) + + AVX512F +
immintrin.h
+ Logical
- - - - Reduce the packed 32-bit integers in "a" by bitwise OR using mask "k". Returns - the bitwise OR of all active elements in "a". - - DEFINE REDUCE_OR(src, len) { - IF len == 2 - RETURN src[31:0] OR src[63:32] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*32 - src[i+31:i] := src[i+31:i] OR src[i+32*len+31:i+32*len] - ENDFOR - RETURN REDUCE_OR(src[32*len-1:0], len) - } - tmp := a - FOR j := 0 to 16 - i := j*32 - IF k[j] - tmp[i+31:i] := a[i+31:i] - ELSE - tmp[i+31:i] := 0 - FI - ENDFOR - dst[31:0] := REDUCE_OR(tmp, 16) - - AVX512F -
immintrin.h
- Logical + + + + Reduce the packed 32-bit integers in "a" by bitwise OR using mask "k". Returns the bitwise OR of all active elements in "a". + +DEFINE REDUCE_OR(src, len) { + IF len == 2 + RETURN src[31:0] OR src[63:32] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*32 + src[i+31:i] := src[i+31:i] OR src[i+32*len+31:i+32*len] + ENDFOR + RETURN REDUCE_OR(src[32*len-1:0], len) +} +tmp := a +FOR j := 0 to 16 + i := j*32 + IF k[j] + tmp[i+31:i] := a[i+31:i] + ELSE + tmp[i+31:i] := 0 + FI +ENDFOR +dst[31:0] := REDUCE_OR(tmp, 16) + + AVX512F +
immintrin.h
+ Logical
- - - - Reduce the packed 64-bit integers in "a" by bitwise OR using mask "k". Returns - the bitwise OR of all active elements in "a". - - DEFINE REDUCE_OR(src, len) { - IF len == 2 - RETURN src[63:0] OR src[127:64] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*64 - src[i+63:i] := src[i+63:i] OR src[i+64*len+63:i+64*len] - ENDFOR - RETURN REDUCE_OR(src[64*len-1:0], len) - } - tmp := a - FOR j := 0 to 8 - i := j*64 - IF k[j] - tmp[i+63:i] := a[i+63:i] - ELSE - tmp[i+63:i] := 0 - FI - ENDFOR - dst[63:0] := REDUCE_OR(tmp, 8) - - AVX512F -
immintrin.h
- Logical + + + + Reduce the packed 64-bit integers in "a" by bitwise OR using mask "k". Returns the bitwise OR of all active elements in "a". + +DEFINE REDUCE_OR(src, len) { + IF len == 2 + RETURN src[63:0] OR src[127:64] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*64 + src[i+63:i] := src[i+63:i] OR src[i+64*len+63:i+64*len] + ENDFOR + RETURN REDUCE_OR(src[64*len-1:0], len) +} +tmp := a +FOR j := 0 to 8 + i := j*64 + IF k[j] + tmp[i+63:i] := a[i+63:i] + ELSE + tmp[i+63:i] := 0 + FI +ENDFOR +dst[63:0] := REDUCE_OR(tmp, 8) + + AVX512F +
immintrin.h
+ Logical
- - - Reduce the packed 32-bit integers in "a" by bitwise AND. Returns the bitwise - AND of all elements in "a". - - DEFINE REDUCE_AND(src, len) { - IF len == 2 - RETURN src[31:0] AND src[63:32] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*32 - src[i+31:i] := src[i+31:i] AND src[i+32*len+31:i+32*len] - ENDFOR - RETURN REDUCE_AND(src[32*len-1:0], len) - } - dst[31:0] := REDUCE_AND(a, 16) - - AVX512F -
immintrin.h
- Logical + + + Reduce the packed 32-bit integers in "a" by bitwise AND. Returns the bitwise AND of all elements in "a". + +DEFINE REDUCE_AND(src, len) { + IF len == 2 + RETURN src[31:0] AND src[63:32] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*32 + src[i+31:i] := src[i+31:i] AND src[i+32*len+31:i+32*len] + ENDFOR + RETURN REDUCE_AND(src[32*len-1:0], len) +} +dst[31:0] := REDUCE_AND(a, 16) + + AVX512F +
immintrin.h
+ Logical
- - - Reduce the packed 64-bit integers in "a" by bitwise AND. Returns the bitwise - AND of all elements in "a". - - DEFINE REDUCE_AND(src, len) { - IF len == 2 - RETURN src[63:0] AND src[127:64] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*64 - src[i+63:i] := src[i+63:i] AND src[i+64*len+63:i+64*len] - ENDFOR - RETURN REDUCE_AND(src[64*len-1:0], len) - } - dst[63:0] := REDUCE_AND(a, 8) - - AVX512F -
immintrin.h
- Logical + + + Reduce the packed 64-bit integers in "a" by bitwise AND. Returns the bitwise AND of all elements in "a". + +DEFINE REDUCE_AND(src, len) { + IF len == 2 + RETURN src[63:0] AND src[127:64] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*64 + src[i+63:i] := src[i+63:i] AND src[i+64*len+63:i+64*len] + ENDFOR + RETURN REDUCE_AND(src[64*len-1:0], len) +} +dst[63:0] := REDUCE_AND(a, 8) + + AVX512F +
immintrin.h
+ Logical
- - - Reduce the packed 32-bit integers in "a" by bitwise OR. Returns the bitwise OR - of all elements in "a". - - DEFINE REDUCE_OR(src, len) { - IF len == 2 - RETURN src[31:0] OR src[63:32] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*32 - src[i+31:i] := src[i+31:i] OR src[i+32*len+31:i+32*len] - ENDFOR - RETURN REDUCE_OR(src[32*len-1:0], len) - } - dst[31:0] := REDUCE_OR(a, 16) - - AVX512F -
immintrin.h
- Logical + + + Reduce the packed 32-bit integers in "a" by bitwise OR. Returns the bitwise OR of all elements in "a". + +DEFINE REDUCE_OR(src, len) { + IF len == 2 + RETURN src[31:0] OR src[63:32] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*32 + src[i+31:i] := src[i+31:i] OR src[i+32*len+31:i+32*len] + ENDFOR + RETURN REDUCE_OR(src[32*len-1:0], len) +} +dst[31:0] := REDUCE_OR(a, 16) + + AVX512F +
immintrin.h
+ Logical
- - - Reduce the packed 64-bit integers in "a" by bitwise OR. Returns the bitwise OR - of all elements in "a". - - DEFINE REDUCE_OR(src, len) { - IF len == 2 - RETURN src[63:0] OR src[127:64] - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*64 - src[i+63:i] := src[i+63:i] OR src[i+64*len+63:i+64*len] - ENDFOR - RETURN REDUCE_OR(src[64*len-1:0], len) - } - dst[63:0] := REDUCE_OR(a, 8) - - AVX512F -
immintrin.h
- Logical + + + Reduce the packed 64-bit integers in "a" by bitwise OR. Returns the bitwise OR of all elements in "a". + +DEFINE REDUCE_OR(src, len) { + IF len == 2 + RETURN src[63:0] OR src[127:64] + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*64 + src[i+63:i] := src[i+63:i] OR src[i+64*len+63:i+64*len] + ENDFOR + RETURN REDUCE_OR(src[64*len-1:0], len) +} +dst[63:0] := REDUCE_OR(a, 8) + + AVX512F +
immintrin.h
+ Logical
- - - - - - Performs element-by-element bitwise AND between packed 32-bit integer elements - of "v2" and "v3", storing the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := v2[i+31:i] & v3[i+31:i] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Logical + + + + + + Performs element-by-element bitwise AND between packed 32-bit integer elements of "v2" and "v3", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := v2[i+31:i] & v3[i+31:i] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Logical
- - - - - - Compare packed signed 32-bit integers in "a" and "b", and store packed maximum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + Compare packed signed 32-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Compare packed signed 32-bit integers in "a" and "b", and store packed maximum - values in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + Compare packed signed 32-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - Compare packed unsigned 32-bit integers in "a" and "b", and store packed - maximum values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Compare packed unsigned 32-bit integers in "a" and "b", and store packed - maximum values in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - Compare packed signed 32-bit integers in "a" and "b", and store packed minimum - values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + Compare packed signed 32-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Compare packed signed 32-bit integers in "a" and "b", and store packed minimum - values in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + Compare packed signed 32-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - Compare packed unsigned 32-bit integers in "a" and "b", and store packed - minimum values in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Compare packed unsigned 32-bit integers in "a" and "b", and store packed - minimum values in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Special Math Functions + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed signed 32-bit integers in "a" by maximum using mask "k". - Returns the maximum of all active elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[31:0] > src[63:32] ? src[31:0] : src[63:32]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*32 - src[i+31:i] := (src[i+31:i] > src[i+32*len+31:i+32*len] ? src[i+31:i] : - src[i+32*len+31:i+32*len]) - ENDFOR - RETURN REDUCE_MAX(src[32*len-1:0], len) - } - tmp := a - FOR j := 0 to 16 - i := j*32 - IF k[j] - tmp[i+31:i] := a[i+31:i] - ELSE - tmp[i+31:i] := Int32(-0x80000000) - FI - ENDFOR - dst[31:0] := REDUCE_MAX(tmp, 16) - - AVX512F -
immintrin.h
- Special Math Functions + + + + Reduce the packed signed 32-bit integers in "a" by maximum using mask "k". Returns the maximum of all active elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[31:0] > src[63:32] ? src[31:0] : src[63:32]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*32 + src[i+31:i] := (src[i+31:i] > src[i+32*len+31:i+32*len] ? src[i+31:i] : src[i+32*len+31:i+32*len]) + ENDFOR + RETURN REDUCE_MAX(src[32*len-1:0], len) +} +tmp := a +FOR j := 0 to 16 + i := j*32 + IF k[j] + tmp[i+31:i] := a[i+31:i] + ELSE + tmp[i+31:i] := Int32(-0x80000000) + FI +ENDFOR +dst[31:0] := REDUCE_MAX(tmp, 16) + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed signed 64-bit integers in "a" by maximum using mask "k". - Returns the maximum of all active elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[63:0] > src[127:64] ? src[63:0] : src[127:64]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*64 - src[i+63:i] := (src[i+63:i] > src[i+64*len+63:i+64*len] ? src[i+63:i] : - src[i+64*len+63:i+64*len]) - ENDFOR - RETURN REDUCE_MAX(src[64*len-1:0], len) - } - tmp := a - FOR j := 0 to 8 - i := j*64 - IF k[j] - tmp[i+63:i] := a[i+63:i] - ELSE - tmp[i+63:i] := Int64(-0x8000000000000000) - FI - ENDFOR - dst[63:0] := REDUCE_MAX(tmp, 8) - - AVX512F -
immintrin.h
- Special Math Functions + + + + Reduce the packed signed 64-bit integers in "a" by maximum using mask "k". Returns the maximum of all active elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[63:0] > src[127:64] ? src[63:0] : src[127:64]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*64 + src[i+63:i] := (src[i+63:i] > src[i+64*len+63:i+64*len] ? src[i+63:i] : src[i+64*len+63:i+64*len]) + ENDFOR + RETURN REDUCE_MAX(src[64*len-1:0], len) +} +tmp := a +FOR j := 0 to 8 + i := j*64 + IF k[j] + tmp[i+63:i] := a[i+63:i] + ELSE + tmp[i+63:i] := Int64(-0x8000000000000000) + FI +ENDFOR +dst[63:0] := REDUCE_MAX(tmp, 8) + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed unsigned 32-bit integers in "a" by maximum using mask "k". - Returns the maximum of all active elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[31:0] > src[63:32] ? src[31:0] : src[63:32]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*32 - src[i+31:i] := (src[i+31:i] > src[i+32*len+31:i+32*len] ? src[i+31:i] : - src[i+32*len+31:i+32*len]) - ENDFOR - RETURN REDUCE_MAX(src[32*len-1:0], len) - } - tmp := a - FOR j := 0 to 16 - i := j*32 - IF k[j] - tmp[i+31:i] := a[i+31:i] - ELSE - tmp[i+31:i] := 0 - FI - ENDFOR - dst[31:0] := REDUCE_MAX(tmp, 16) - - AVX512F -
immintrin.h
- Special Math Functions + + + + Reduce the packed unsigned 32-bit integers in "a" by maximum using mask "k". Returns the maximum of all active elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[31:0] > src[63:32] ? src[31:0] : src[63:32]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*32 + src[i+31:i] := (src[i+31:i] > src[i+32*len+31:i+32*len] ? src[i+31:i] : src[i+32*len+31:i+32*len]) + ENDFOR + RETURN REDUCE_MAX(src[32*len-1:0], len) +} +tmp := a +FOR j := 0 to 16 + i := j*32 + IF k[j] + tmp[i+31:i] := a[i+31:i] + ELSE + tmp[i+31:i] := 0 + FI +ENDFOR +dst[31:0] := REDUCE_MAX(tmp, 16) + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed unsigned 64-bit integers in "a" by maximum using mask "k". - Returns the maximum of all active elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[63:0] > src[127:64] ? src[63:0] : src[127:64]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*64 - src[i+63:i] := (src[i+63:i] > src[i+64*len+63:i+64*len] ? src[i+63:i] : - src[i+64*len+63:i+64*len]) - ENDFOR - RETURN REDUCE_MAX(src[64*len-1:0], len) - } - tmp := a - FOR j := 0 to 8 - i := j*64 - IF k[j] - tmp[i+63:i] := a[i+63:i] - ELSE - tmp[i+63:i] := 0 - FI - ENDFOR - dst[63:0] := REDUCE_MAX(tmp, 8) - - AVX512F -
immintrin.h
- Special Math Functions + + + + Reduce the packed unsigned 64-bit integers in "a" by maximum using mask "k". Returns the maximum of all active elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[63:0] > src[127:64] ? src[63:0] : src[127:64]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*64 + src[i+63:i] := (src[i+63:i] > src[i+64*len+63:i+64*len] ? src[i+63:i] : src[i+64*len+63:i+64*len]) + ENDFOR + RETURN REDUCE_MAX(src[64*len-1:0], len) +} +tmp := a +FOR j := 0 to 8 + i := j*64 + IF k[j] + tmp[i+63:i] := a[i+63:i] + ELSE + tmp[i+63:i] := 0 + FI +ENDFOR +dst[63:0] := REDUCE_MAX(tmp, 8) + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed double-precision (64-bit) floating-point elements in "a" by - maximum using mask "k". Returns the maximum of all active elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[63:0] > src[127:64] ? src[63:0] : src[127:64]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*64 - src[i+63:i] := (src[i+63:i] > src[i+64*len+63:i+64*len] ? src[i+63:i] : - src[i+64*len+63:i+64*len]) - ENDFOR - RETURN REDUCE_MAX(src[64*len-1:0], len) - } - tmp := a - FOR j := 0 to 8 - i := j*64 - IF k[j] - tmp[i+63:i] := a[i+63:i] - ELSE - tmp[i+63:i] := Cast_FP64(0xFFEFFFFFFFFFFFFF) - FI - ENDFOR - dst[63:0] := REDUCE_MAX(tmp, 8) - - AVX512F -
immintrin.h
- Special Math Functions + + + + Reduce the packed double-precision (64-bit) floating-point elements in "a" by maximum using mask "k". Returns the maximum of all active elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[63:0] > src[127:64] ? src[63:0] : src[127:64]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*64 + src[i+63:i] := (src[i+63:i] > src[i+64*len+63:i+64*len] ? src[i+63:i] : src[i+64*len+63:i+64*len]) + ENDFOR + RETURN REDUCE_MAX(src[64*len-1:0], len) +} +tmp := a +FOR j := 0 to 8 + i := j*64 + IF k[j] + tmp[i+63:i] := a[i+63:i] + ELSE + tmp[i+63:i] := Cast_FP64(0xFFEFFFFFFFFFFFFF) + FI +ENDFOR +dst[63:0] := REDUCE_MAX(tmp, 8) + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed single-precision (32-bit) floating-point elements in "a" by - maximum using mask "k". Returns the maximum of all active elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[31:0] > src[63:32] ? src[31:0] : src[63:32]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*32 - src[i+31:i] := (src[i+31:i] > src[i+32*len+31:i+32*len] ? src[i+31:i] : - src[i+32*len+31:i+32*len]) - ENDFOR - RETURN REDUCE_MAX(src[32*len-1:0], len) - } - tmp := a - FOR j := 0 to 16 - i := j*32 - IF k[j] - tmp[i+31:i] := a[i+31:i] - ELSE - tmp[i+31:i] := Cast_FP32(0xFF7FFFFF) - FI - ENDFOR - dst[31:0] := REDUCE_MAX(tmp, 16) - - AVX512F -
immintrin.h
- Special Math Functions + + + + Reduce the packed single-precision (32-bit) floating-point elements in "a" by maximum using mask "k". Returns the maximum of all active elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[31:0] > src[63:32] ? src[31:0] : src[63:32]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*32 + src[i+31:i] := (src[i+31:i] > src[i+32*len+31:i+32*len] ? src[i+31:i] : src[i+32*len+31:i+32*len]) + ENDFOR + RETURN REDUCE_MAX(src[32*len-1:0], len) +} +tmp := a +FOR j := 0 to 16 + i := j*32 + IF k[j] + tmp[i+31:i] := a[i+31:i] + ELSE + tmp[i+31:i] := Cast_FP32(0xFF7FFFFF) + FI +ENDFOR +dst[31:0] := REDUCE_MAX(tmp, 16) + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed signed 32-bit integers in "a" by maximum using mask "k". - Returns the minimum of all active elements in "a". - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[31:0] < src[63:32] ? src[31:0] : src[63:32]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*32 - src[i+31:i] := (src[i+31:i] < src[i+32*len+31:i+32*len] ? src[i+31:i] : - src[i+32*len+31:i+32*len]) - ENDFOR - RETURN REDUCE_MIN(src[32*len-1:0], len) - } - tmp := a - FOR j := 0 to 16 - i := j*32 - IF k[j] - tmp[i+31:i] := a[i+31:i] - ELSE - tmp[i+31:i] := Int32(0x7FFFFFFF) - FI - ENDFOR - dst[31:0] := REDUCE_MIN(tmp, 16) - - AVX512F -
immintrin.h
- Special Math Functions + + + + Reduce the packed signed 32-bit integers in "a" by maximum using mask "k". Returns the minimum of all active elements in "a". + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[31:0] < src[63:32] ? src[31:0] : src[63:32]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*32 + src[i+31:i] := (src[i+31:i] < src[i+32*len+31:i+32*len] ? src[i+31:i] : src[i+32*len+31:i+32*len]) + ENDFOR + RETURN REDUCE_MIN(src[32*len-1:0], len) +} +tmp := a +FOR j := 0 to 16 + i := j*32 + IF k[j] + tmp[i+31:i] := a[i+31:i] + ELSE + tmp[i+31:i] := Int32(0x7FFFFFFF) + FI +ENDFOR +dst[31:0] := REDUCE_MIN(tmp, 16) + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed signed 64-bit integers in "a" by maximum using mask "k". - Returns the minimum of all active elements in "a". - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[63:0] < src[127:64] ? src[63:0] : src[127:64]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*64 - src[i+63:i] := (src[i+63:i] < src[i+64*len+63:i+64*len] ? src[i+63:i] : - src[i+64*len+63:i+64*len]) - ENDFOR - RETURN REDUCE_MIN(src[64*len-1:0], len) - } - tmp := a - FOR j := 0 to 8 - i := j*64 - IF k[j] - tmp[i+63:i] := a[i+63:i] - ELSE - tmp[i+63:i] := Int64(0x7FFFFFFFFFFFFFFF) - FI - ENDFOR - dst[63:0] := REDUCE_MIN(tmp, 8) - - AVX512F -
immintrin.h
- Special Math Functions + + + + Reduce the packed signed 64-bit integers in "a" by maximum using mask "k". Returns the minimum of all active elements in "a". + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[63:0] < src[127:64] ? src[63:0] : src[127:64]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*64 + src[i+63:i] := (src[i+63:i] < src[i+64*len+63:i+64*len] ? src[i+63:i] : src[i+64*len+63:i+64*len]) + ENDFOR + RETURN REDUCE_MIN(src[64*len-1:0], len) +} +tmp := a +FOR j := 0 to 8 + i := j*64 + IF k[j] + tmp[i+63:i] := a[i+63:i] + ELSE + tmp[i+63:i] := Int64(0x7FFFFFFFFFFFFFFF) + FI +ENDFOR +dst[63:0] := REDUCE_MIN(tmp, 8) + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed unsigned 32-bit integers in "a" by maximum using mask "k". - Returns the minimum of all active elements in "a". - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[31:0] < src[63:32] ? src[31:0] : src[63:32]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*32 - src[i+31:i] := (src[i+31:i] < src[i+32*len+31:i+32*len] ? src[i+31:i] : - src[i+32*len+31:i+32*len]) - ENDFOR - RETURN REDUCE_MIN(src[32*len-1:0], len) - } - tmp := a - FOR j := 0 to 16 - i := j*32 - IF k[j] - tmp[i+31:i] := a[i+31:i] - ELSE - tmp[i+31:i] := 0xFFFFFFFF - FI - ENDFOR - dst[31:0] := REDUCE_MIN(tmp, 16) - - AVX512F -
immintrin.h
- Special Math Functions + + + + Reduce the packed unsigned 32-bit integers in "a" by maximum using mask "k". Returns the minimum of all active elements in "a". + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[31:0] < src[63:32] ? src[31:0] : src[63:32]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*32 + src[i+31:i] := (src[i+31:i] < src[i+32*len+31:i+32*len] ? src[i+31:i] : src[i+32*len+31:i+32*len]) + ENDFOR + RETURN REDUCE_MIN(src[32*len-1:0], len) +} +tmp := a +FOR j := 0 to 16 + i := j*32 + IF k[j] + tmp[i+31:i] := a[i+31:i] + ELSE + tmp[i+31:i] := 0xFFFFFFFF + FI +ENDFOR +dst[31:0] := REDUCE_MIN(tmp, 16) + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed unsigned 64-bit integers in "a" by minimum using mask "k". - Returns the minimum of all active elements in "a". - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[63:0] < src[127:64] ? src[63:0] : src[127:64]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*64 - src[i+63:i] := (src[i+63:i] < src[i+64*len+63:i+64*len] ? src[i+63:i] : - src[i+64*len+63:i+64*len]) - ENDFOR - RETURN REDUCE_MIN(src[64*len-1:0], len) - } - tmp := a - FOR j := 0 to 8 - i := j*64 - IF k[j] - tmp[i+63:i] := a[i+63:i] - ELSE - tmp[i+63:i] := 0xFFFFFFFFFFFFFFFF - FI - ENDFOR - dst[63:0] := REDUCE_MIN(tmp, 8) - - AVX512F -
immintrin.h
- Special Math Functions + + + + Reduce the packed unsigned 64-bit integers in "a" by minimum using mask "k". Returns the minimum of all active elements in "a". + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[63:0] < src[127:64] ? src[63:0] : src[127:64]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*64 + src[i+63:i] := (src[i+63:i] < src[i+64*len+63:i+64*len] ? src[i+63:i] : src[i+64*len+63:i+64*len]) + ENDFOR + RETURN REDUCE_MIN(src[64*len-1:0], len) +} +tmp := a +FOR j := 0 to 8 + i := j*64 + IF k[j] + tmp[i+63:i] := a[i+63:i] + ELSE + tmp[i+63:i] := 0xFFFFFFFFFFFFFFFF + FI +ENDFOR +dst[63:0] := REDUCE_MIN(tmp, 8) + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed double-precision (64-bit) floating-point elements in "a" by - maximum using mask "k". Returns the minimum of all active elements in "a". - [min_float_note] - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[63:0] < src[127:64] ? src[63:0] : src[127:64]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*64 - src[i+63:i] := (src[i+63:i] < src[i+64*len+63:i+64*len] ? src[i+63:i] : - src[i+64*len+63:i+64*len]) - ENDFOR - RETURN REDUCE_MIN(src[64*len-1:0], len) - } - tmp := a - FOR j := 0 to 8 - i := j*64 - IF k[j] - tmp[i+63:i] := a[i+63:i] - ELSE - tmp[i+63:i] := Cast_FP64(0x7FEFFFFFFFFFFFFF) - FI - ENDFOR - dst[63:0] := REDUCE_MIN(tmp, 8) - - AVX512F -
immintrin.h
- Special Math Functions + + + + Reduce the packed double-precision (64-bit) floating-point elements in "a" by maximum using mask "k". Returns the minimum of all active elements in "a". [min_float_note] + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[63:0] < src[127:64] ? src[63:0] : src[127:64]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*64 + src[i+63:i] := (src[i+63:i] < src[i+64*len+63:i+64*len] ? src[i+63:i] : src[i+64*len+63:i+64*len]) + ENDFOR + RETURN REDUCE_MIN(src[64*len-1:0], len) +} +tmp := a +FOR j := 0 to 8 + i := j*64 + IF k[j] + tmp[i+63:i] := a[i+63:i] + ELSE + tmp[i+63:i] := Cast_FP64(0x7FEFFFFFFFFFFFFF) + FI +ENDFOR +dst[63:0] := REDUCE_MIN(tmp, 8) + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - Reduce the packed single-precision (32-bit) floating-point elements in "a" by - maximum using mask "k". Returns the minimum of all active elements in "a". - [min_float_note] - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[31:0] < src[63:32] ? src[31:0] : src[63:32]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*32 - src[i+31:i] := (src[i+31:i] < src[i+32*len+31:i+32*len] ? src[i+31:i] : - src[i+32*len+31:i+32*len]) - ENDFOR - RETURN REDUCE_MIN(src[32*len-1:0], len) - } - tmp := a - FOR j := 0 to 16 - i := j*32 - IF k[j] - tmp[i+31:i] := a[i+31:i] - ELSE - tmp[i+31:i] := Cast_FP32(0x7F7FFFFF) - FI - ENDFOR - dst[31:0] := REDUCE_MIN(tmp, 16) - - AVX512F -
immintrin.h
- Special Math Functions + + + + Reduce the packed single-precision (32-bit) floating-point elements in "a" by maximum using mask "k". Returns the minimum of all active elements in "a". [min_float_note] + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[31:0] < src[63:32] ? src[31:0] : src[63:32]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*32 + src[i+31:i] := (src[i+31:i] < src[i+32*len+31:i+32*len] ? src[i+31:i] : src[i+32*len+31:i+32*len]) + ENDFOR + RETURN REDUCE_MIN(src[32*len-1:0], len) +} +tmp := a +FOR j := 0 to 16 + i := j*32 + IF k[j] + tmp[i+31:i] := a[i+31:i] + ELSE + tmp[i+31:i] := Cast_FP32(0x7F7FFFFF) + FI +ENDFOR +dst[31:0] := REDUCE_MIN(tmp, 16) + + AVX512F +
immintrin.h
+ Special Math Functions
- - - Reduce the packed signed 32-bit integers in "a" by maximum. Returns the maximum - of all elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[31:0] > src[63:32] ? src[31:0] : src[63:32]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*32 - src[i+31:i] := (src[i+31:i] > src[i+32*len+31:i+32*len] ? src[i+31:i] : - src[i+32*len+31:i+32*len]) - ENDFOR - RETURN REDUCE_MAX(src[32*len-1:0], len) - } - dst[31:0] := REDUCE_MAX(a, 16) - - AVX512F -
immintrin.h
- Special Math Functions + + + Reduce the packed signed 32-bit integers in "a" by maximum. Returns the maximum of all elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[31:0] > src[63:32] ? src[31:0] : src[63:32]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*32 + src[i+31:i] := (src[i+31:i] > src[i+32*len+31:i+32*len] ? src[i+31:i] : src[i+32*len+31:i+32*len]) + ENDFOR + RETURN REDUCE_MAX(src[32*len-1:0], len) +} +dst[31:0] := REDUCE_MAX(a, 16) + + AVX512F +
immintrin.h
+ Special Math Functions
- - - Reduce the packed signed 64-bit integers in "a" by maximum. Returns the maximum - of all elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[63:0] > src[127:64] ? src[63:0] : src[127:64]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*64 - src[i+63:i] := (src[i+63:i] > src[i+64*len+63:i+64*len] ? src[i+63:i] : - src[i+64*len+63:i+64*len]) - ENDFOR - RETURN REDUCE_MAX(src[64*len-1:0], len) - } - dst[63:0] := REDUCE_MAX(a, 8) - - AVX512F -
immintrin.h
- Special Math Functions + + + Reduce the packed signed 64-bit integers in "a" by maximum. Returns the maximum of all elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[63:0] > src[127:64] ? src[63:0] : src[127:64]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*64 + src[i+63:i] := (src[i+63:i] > src[i+64*len+63:i+64*len] ? src[i+63:i] : src[i+64*len+63:i+64*len]) + ENDFOR + RETURN REDUCE_MAX(src[64*len-1:0], len) +} +dst[63:0] := REDUCE_MAX(a, 8) + + AVX512F +
immintrin.h
+ Special Math Functions
- - - Reduce the packed unsigned 32-bit integers in "a" by maximum. Returns the - maximum of all elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[31:0] > src[63:32] ? src[31:0] : src[63:32]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*32 - src[i+31:i] := (src[i+31:i] > src[i+32*len+31:i+32*len] ? src[i+31:i] : - src[i+32*len+31:i+32*len]) - ENDFOR - RETURN REDUCE_MAX(src[32*len-1:0], len) - } - dst[31:0] := REDUCE_MAX(a, 16) - - AVX512F -
immintrin.h
- Special Math Functions + + + Reduce the packed unsigned 32-bit integers in "a" by maximum. Returns the maximum of all elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[31:0] > src[63:32] ? src[31:0] : src[63:32]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*32 + src[i+31:i] := (src[i+31:i] > src[i+32*len+31:i+32*len] ? src[i+31:i] : src[i+32*len+31:i+32*len]) + ENDFOR + RETURN REDUCE_MAX(src[32*len-1:0], len) +} +dst[31:0] := REDUCE_MAX(a, 16) + + AVX512F +
immintrin.h
+ Special Math Functions
- - - Reduce the packed unsigned 64-bit integers in "a" by maximum. Returns the - maximum of all elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[63:0] > src[127:64] ? src[63:0] : src[127:64]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*64 - src[i+63:i] := (src[i+63:i] > src[i+64*len+63:i+64*len] ? src[i+63:i] : - src[i+64*len+63:i+64*len]) - ENDFOR - RETURN REDUCE_MAX(src[64*len-1:0], len) - } - dst[63:0] := REDUCE_MAX(a, 8) - - AVX512F -
immintrin.h
- Special Math Functions + + + Reduce the packed unsigned 64-bit integers in "a" by maximum. Returns the maximum of all elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[63:0] > src[127:64] ? src[63:0] : src[127:64]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*64 + src[i+63:i] := (src[i+63:i] > src[i+64*len+63:i+64*len] ? src[i+63:i] : src[i+64*len+63:i+64*len]) + ENDFOR + RETURN REDUCE_MAX(src[64*len-1:0], len) +} +dst[63:0] := REDUCE_MAX(a, 8) + + AVX512F +
immintrin.h
+ Special Math Functions
- - - Reduce the packed double-precision (64-bit) floating-point elements in "a" by - maximum. Returns the maximum of all elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[63:0] > src[127:64] ? src[63:0] : src[127:64]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*64 - src[i+63:i] := (src[i+63:i] > src[i+64*len+63:i+64*len] ? src[i+63:i] : - src[i+64*len+63:i+64*len]) - ENDFOR - RETURN REDUCE_MAX(src[64*len-1:0], len) - } - dst[63:0] := REDUCE_MAX(a, 8) - - AVX512F -
immintrin.h
- Special Math Functions + + + Reduce the packed double-precision (64-bit) floating-point elements in "a" by maximum. Returns the maximum of all elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[63:0] > src[127:64] ? src[63:0] : src[127:64]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*64 + src[i+63:i] := (src[i+63:i] > src[i+64*len+63:i+64*len] ? src[i+63:i] : src[i+64*len+63:i+64*len]) + ENDFOR + RETURN REDUCE_MAX(src[64*len-1:0], len) +} +dst[63:0] := REDUCE_MAX(a, 8) + + AVX512F +
immintrin.h
+ Special Math Functions
- - - Reduce the packed single-precision (32-bit) floating-point elements in "a" by - maximum. Returns the maximum of all elements in "a". - - DEFINE REDUCE_MAX(src, len) { - IF len == 2 - RETURN (src[31:0] > src[63:32] ? src[31:0] : src[63:32]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*32 - src[i+31:i] := (src[i+31:i] > src[i+32*len+31:i+32*len] ? src[i+31:i] : - src[i+32*len+31:i+32*len]) - ENDFOR - RETURN REDUCE_MAX(src[32*len-1:0], len) - } - dst[31:0] := REDUCE_MAX(a, 16) - - AVX512F -
immintrin.h
- Special Math Functions + + + Reduce the packed single-precision (32-bit) floating-point elements in "a" by maximum. Returns the maximum of all elements in "a". + +DEFINE REDUCE_MAX(src, len) { + IF len == 2 + RETURN (src[31:0] > src[63:32] ? src[31:0] : src[63:32]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*32 + src[i+31:i] := (src[i+31:i] > src[i+32*len+31:i+32*len] ? src[i+31:i] : src[i+32*len+31:i+32*len]) + ENDFOR + RETURN REDUCE_MAX(src[32*len-1:0], len) +} +dst[31:0] := REDUCE_MAX(a, 16) + + AVX512F +
immintrin.h
+ Special Math Functions
- - - Reduce the packed signed 32-bit integers in "a" by minimum. Returns the minimum - of all elements in "a". - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[31:0] < src[63:32] ? src[31:0] : src[63:32]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*32 - src[i+31:i] := (src[i+31:i] < src[i+32*len+31:i+32*len] ? src[i+31:i] : - src[i+32*len+31:i+32*len]) - ENDFOR - RETURN REDUCE_MIN(src[32*len-1:0], len) - } - dst[31:0] := REDUCE_MIN(a, 16) - - AVX512F -
immintrin.h
- Special Math Functions + + + Reduce the packed signed 32-bit integers in "a" by minimum. Returns the minimum of all elements in "a". + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[31:0] < src[63:32] ? src[31:0] : src[63:32]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*32 + src[i+31:i] := (src[i+31:i] < src[i+32*len+31:i+32*len] ? src[i+31:i] : src[i+32*len+31:i+32*len]) + ENDFOR + RETURN REDUCE_MIN(src[32*len-1:0], len) +} +dst[31:0] := REDUCE_MIN(a, 16) + + AVX512F +
immintrin.h
+ Special Math Functions
- - - Reduce the packed signed 64-bit integers in "a" by minimum. Returns the minimum - of all elements in "a". - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[63:0] < src[127:64] ? src[63:0] : src[127:64]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*64 - src[i+63:i] := (src[i+63:i] < src[i+64*len+63:i+64*len] ? src[i+63:i] : - src[i+64*len+63:i+64*len]) - ENDFOR - RETURN REDUCE_MIN(src[64*len-1:0], len) - } - dst[63:0] := REDUCE_MIN(a, 8) - - AVX512F -
immintrin.h
- Special Math Functions + + + Reduce the packed signed 64-bit integers in "a" by minimum. Returns the minimum of all elements in "a". + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[63:0] < src[127:64] ? src[63:0] : src[127:64]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*64 + src[i+63:i] := (src[i+63:i] < src[i+64*len+63:i+64*len] ? src[i+63:i] : src[i+64*len+63:i+64*len]) + ENDFOR + RETURN REDUCE_MIN(src[64*len-1:0], len) +} +dst[63:0] := REDUCE_MIN(a, 8) + + AVX512F +
immintrin.h
+ Special Math Functions
- - - Reduce the packed unsigned 32-bit integers in "a" by minimum. Returns the - minimum of all elements in "a". - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[31:0] < src[63:32] ? src[31:0] : src[63:32]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*32 - src[i+31:i] := (src[i+31:i] < src[i+32*len+31:i+32*len] ? src[i+31:i] : - src[i+32*len+31:i+32*len]) - ENDFOR - RETURN REDUCE_MIN(src[32*len-1:0], len) - } - dst[31:0] := REDUCE_MIN(a, 16) - - AVX512F -
immintrin.h
- Special Math Functions + + + Reduce the packed unsigned 32-bit integers in "a" by minimum. Returns the minimum of all elements in "a". + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[31:0] < src[63:32] ? src[31:0] : src[63:32]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*32 + src[i+31:i] := (src[i+31:i] < src[i+32*len+31:i+32*len] ? src[i+31:i] : src[i+32*len+31:i+32*len]) + ENDFOR + RETURN REDUCE_MIN(src[32*len-1:0], len) +} +dst[31:0] := REDUCE_MIN(a, 16) + + AVX512F +
immintrin.h
+ Special Math Functions
- - - Reduce the packed unsigned 64-bit integers in "a" by minimum. Returns the - minimum of all elements in "a". - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[63:0] < src[127:64] ? src[63:0] : src[127:64]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*64 - src[i+63:i] := (src[i+63:i] < src[i+64*len+63:i+64*len] ? src[i+63:i] : - src[i+64*len+63:i+64*len]) - ENDFOR - RETURN REDUCE_MIN(src[64*len-1:0], len) - } - dst[63:0] := REDUCE_MIN(a, 8) - - AVX512F -
immintrin.h
- Special Math Functions + + + Reduce the packed unsigned 64-bit integers in "a" by minimum. Returns the minimum of all elements in "a". + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[63:0] < src[127:64] ? src[63:0] : src[127:64]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*64 + src[i+63:i] := (src[i+63:i] < src[i+64*len+63:i+64*len] ? src[i+63:i] : src[i+64*len+63:i+64*len]) + ENDFOR + RETURN REDUCE_MIN(src[64*len-1:0], len) +} +dst[63:0] := REDUCE_MIN(a, 8) + + AVX512F +
immintrin.h
+ Special Math Functions
- - - Reduce the packed double-precision (64-bit) floating-point elements in "a" by - minimum. Returns the minimum of all elements in "a". [min_float_note] - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[63:0] < src[127:64] ? src[63:0] : src[127:64]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*64 - src[i+63:i] := (src[i+63:i] < src[i+64*len+63:i+64*len] ? src[i+63:i] : - src[i+64*len+63:i+64*len]) - ENDFOR - RETURN REDUCE_MIN(src[64*len-1:0], len) - } - dst[63:0] := REDUCE_MIN(a, 8) - - AVX512F -
immintrin.h
- Special Math Functions + + + Reduce the packed double-precision (64-bit) floating-point elements in "a" by minimum. Returns the minimum of all elements in "a". [min_float_note] + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[63:0] < src[127:64] ? src[63:0] : src[127:64]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*64 + src[i+63:i] := (src[i+63:i] < src[i+64*len+63:i+64*len] ? src[i+63:i] : src[i+64*len+63:i+64*len]) + ENDFOR + RETURN REDUCE_MIN(src[64*len-1:0], len) +} +dst[63:0] := REDUCE_MIN(a, 8) + + AVX512F +
immintrin.h
+ Special Math Functions
- - - Reduce the packed single-precision (32-bit) floating-point elements in "a" by - minimum. Returns the minimum of all elements in "a". [min_float_note] - - DEFINE REDUCE_MIN(src, len) { - IF len == 2 - RETURN (src[31:0] < src[63:32] ? src[31:0] : src[63:32]) - FI - len := len / 2 - FOR j:= 0 to (len-1) - i := j*32 - src[i+31:i] := (src[i+31:i] < src[i+32*len+31:i+32*len] ? src[i+31:i] : - src[i+32*len+31:i+32*len]) - ENDFOR - RETURN REDUCE_MIN(src[32*len-1:0], len) - } - dst[31:0] := REDUCE_MIN(a, 16) - - AVX512F -
immintrin.h
- Special Math Functions + + + Reduce the packed single-precision (32-bit) floating-point elements in "a" by minimum. Returns the minimum of all elements in "a". [min_float_note] + +DEFINE REDUCE_MIN(src, len) { + IF len == 2 + RETURN (src[31:0] < src[63:32] ? src[31:0] : src[63:32]) + FI + len := len / 2 + FOR j:= 0 to (len-1) + i := j*32 + src[i+31:i] := (src[i+31:i] < src[i+32*len+31:i+32*len] ? src[i+31:i] : src[i+32*len+31:i+32*len]) + ENDFOR + RETURN REDUCE_MIN(src[32*len-1:0], len) +} +dst[31:0] := REDUCE_MIN(a, 16) + + AVX512F +
immintrin.h
+ Special Math Functions
- - - - - - Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF imm8[7:0] > 31 + + + + + + Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 dst[i+31:i] := 0 - ELSE + ELSE dst[i+31:i] := ZeroExtend32(a[i+31:i] << imm8[7:0]) - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - IF imm8[7:0] > 31 - dst[i+31:i] := 0 - ELSE - dst[i+31:i] := ZeroExtend32(a[i+31:i] << imm8[7:0]) - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend32(a[i+31:i] << imm8[7:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - - Shift packed 32-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF count[i+31:i] < 32 + + + + + + Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF count[i+31:i] < 32 dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[i+31:i]) - ELSE + ELSE dst[i+31:i] := 0 - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" left by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in - "dst". - - FOR j := 0 to 15 - i := j*32 - IF count[i+31:i] < 32 - dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + IF count[i+31:i] < 32 + dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - - Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF imm8[7:0] > 31 + + + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) - ELSE + ELSE dst[i+31:i] := SignExtend32(a[i+31:i] >> imm8[7:0]) - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - IF imm8[7:0] > 31 - dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) - ELSE - dst[i+31:i] := SignExtend32(a[i+31:i] >> imm8[7:0]) - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) + ELSE + dst[i+31:i] := SignExtend32(a[i+31:i] >> imm8[7:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - - Shift packed 32-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF count[i+31:i] < 32 + + + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF count[i+31:i] < 32 dst[i+31:i] := SignExtend32(a[i+31:i] >> count[i+31:i]) - ELSE + ELSE dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0) - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in sign bits, and store the results in - "dst". - - FOR j := 0 to 15 - i := j*32 - IF count[i+31:i] < 32 - dst[i+31:i] := SignExtend32(a[i+31:i] >> count[i+31:i]) - ELSE - dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0) - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + IF count[i+31:i] < 32 + dst[i+31:i] := SignExtend32(a[i+31:i] >> count[i+31:i]) + ELSE + dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0) + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - - Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF imm8[7:0] > 31 + + + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF imm8[7:0] > 31 dst[i+31:i] := 0 - ELSE + ELSE dst[i+31:i] := ZeroExtend32(a[i+31:i] >> imm8[7:0]) - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 15 - i := j*32 - IF imm8[7:0] > 31 - dst[i+31:i] := 0 - ELSE - dst[i+31:i] := ZeroExtend32(a[i+31:i] >> imm8[7:0]) - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend32(a[i+31:i] >> imm8[7:0]) + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - - - - Shift packed 32-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - IF count[i+31:i] < 32 + + + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + IF count[i+31:i] < 32 dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[i+31:i]) - ELSE + ELSE dst[i+31:i] := 0 - FI - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + FI + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512F +
immintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" right by the amount specified by the - corresponding element in "count" while shifting in zeros, and store the results in - "dst". - - FOR j := 0 to 15 - i := j*32 - IF count[i+31:i] < 32 - dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 15 + i := j*32 + IF count[i+31:i] < 32 + dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Shift
- - - Cast vector of type __m512d to type __m512. - This intrinsic is only used for compilation and does not generate any instructions, thus - it has zero latency. - AVX512F -
immintrin.h
- Cast + + + Cast vector of type __m512d to type __m512. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512F +
immintrin.h
+ Cast
- - - Cast vector of type __m512d to type __m512i. - This intrinsic is only used for compilation and does not generate any instructions, thus - it has zero latency. - AVX512F -
immintrin.h
- Cast + + + Cast vector of type __m512d to type __m512i. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512F +
immintrin.h
+ Cast
- - - Cast vector of type __m512 to type __m512d. - This intrinsic is only used for compilation and does not generate any instructions, thus - it has zero latency. - AVX512F -
immintrin.h
- Cast + + + Cast vector of type __m512 to type __m512d. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512F +
immintrin.h
+ Cast
- - - Cast vector of type __m512 to type __m512i. - This intrinsic is only used for compilation and does not generate any instructions, thus - it has zero latency. - AVX512F -
immintrin.h
- Cast + + + Cast vector of type __m512 to type __m512i. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512F +
immintrin.h
+ Cast
- - - Cast vector of type __m512i to type __m512d. - This intrinsic is only used for compilation and does not generate any instructions, thus - it has zero latency. - AVX512F -
immintrin.h
- Cast + + + Cast vector of type __m512i to type __m512d. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512F +
immintrin.h
+ Cast
- - - Cast vector of type __m512i to type __m512. - This intrinsic is only used for compilation and does not generate any instructions, thus - it has zero latency. - AVX512F -
immintrin.h
- Cast + + + Cast vector of type __m512i to type __m512. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512F +
immintrin.h
+ Cast
- - - Performs element-by-element conversion of the lower half of packed - single-precision (32-bit) floating-point elements in "v2" to packed double-precision - (64-bit) floating-point elements, storing the results in "dst". - - FOR j := 0 to 7 - i := j*32 - n := j*64 - dst[n+63:n] := Convert_FP32_To_FP64(v2[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Performs element-by-element conversion of the lower half of packed single-precision (32-bit) floating-point elements in "v2" to packed double-precision (64-bit) floating-point elements, storing the results in "dst". + +FOR j := 0 to 7 + i := j*32 + n := j*64 + dst[n+63:n] := Convert_FP32_To_FP64(v2[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Performs element-by-element conversion of the lower half of packed - single-precision (32-bit) floating-point elements in "v2" to packed double-precision - (64-bit) floating-point elements, storing the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - l := j*64 - IF k[j] - dst[l+63:l] := Convert_FP32_To_FP64(v2[i+31:i]) - ELSE - dst[l+63:l] := src[l+63:l] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Performs element-by-element conversion of the lower half of packed single-precision (32-bit) floating-point elements in "v2" to packed double-precision (64-bit) floating-point elements, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + l := j*64 + IF k[j] + dst[l+63:l] := Convert_FP32_To_FP64(v2[i+31:i]) + ELSE + dst[l+63:l] := src[l+63:l] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Performs element-by-element conversion of the lower half of packed 32-bit - integer elements in "v2" to packed double-precision (64-bit) floating-point elements, - storing the results in "dst". - - FOR j := 0 to 7 - i := j*32 - l := j*64 - dst[l+63:l] := Convert_Int32_To_FP64(v2[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Performs element-by-element conversion of the lower half of packed 32-bit integer elements in "v2" to packed double-precision (64-bit) floating-point elements, storing the results in "dst". + +FOR j := 0 to 7 + i := j*32 + l := j*64 + dst[l+63:l] := Convert_Int32_To_FP64(v2[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Performs element-by-element conversion of the lower half of packed 32-bit - integer elements in "v2" to packed double-precision (64-bit) floating-point elements, - storing the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - n := j*64 - IF k[j] - dst[n+63:n] := Convert_Int32_To_FP64(v2[i+31:i]) - ELSE - dst[n+63:n] := src[n+63:n] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Performs element-by-element conversion of the lower half of packed 32-bit integer elements in "v2" to packed double-precision (64-bit) floating-point elements, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + n := j*64 + IF k[j] + dst[n+63:n] := Convert_Int32_To_FP64(v2[i+31:i]) + ELSE + dst[n+63:n] := src[n+63:n] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Performs element-by-element conversion of the lower half of packed 32-bit - unsigned integer elements in "v2" to packed double-precision (64-bit) floating-point - elements, storing the results in "dst". - - FOR j := 0 to 7 - i := j*32 - n := j*64 - dst[n+63:n] := Convert_Int32_To_FP64(v2[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Performs element-by-element conversion of the lower half of packed 32-bit unsigned integer elements in "v2" to packed double-precision (64-bit) floating-point elements, storing the results in "dst". + +FOR j := 0 to 7 + i := j*32 + n := j*64 + dst[n+63:n] := Convert_Int32_To_FP64(v2[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Performs element-by-element conversion of the lower half of 32-bit unsigned - integer elements in "v2" to packed double-precision (64-bit) floating-point elements, - storing the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - l := j*64 - IF k[j] - dst[l+63:l] := Convert_Int32_To_FP64(v2[i+31:i]) - ELSE - dst[l+63:l] := src[l+63:l] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Performs element-by-element conversion of the lower half of 32-bit unsigned integer elements in "v2" to packed double-precision (64-bit) floating-point elements, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + l := j*64 + IF k[j] + dst[l+63:l] := Convert_Int32_To_FP64(v2[i+31:i]) + ELSE + dst[l+63:l] := src[l+63:l] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - Performs an element-by-element conversion of packed double-precision (64-bit) - floating-point elements in "v2" to single-precision (32-bit) floating-point elements and - stores them in "dst". The elements are stored in the lower half of the results vector, - while the remaining upper half locations are set to 0. - - FOR j := 0 to 7 - i := j*64 - k := j*32 - dst[k+31:k] := Convert_FP64_To_FP32(v2[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + Performs an element-by-element conversion of packed double-precision (64-bit) floating-point elements in "v2" to single-precision (32-bit) floating-point elements and stores them in "dst". The elements are stored in the lower half of the results vector, while the remaining upper half locations are set to 0. + +FOR j := 0 to 7 + i := j*64 + k := j*32 + dst[k+31:k] := Convert_FP64_To_FP32(v2[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - Performs an element-by-element conversion of packed double-precision (64-bit) - floating-point elements in "v2" to single-precision (32-bit) floating-point elements and - stores them in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). The elements are stored in the lower half of the - results vector, while the remaining upper half locations are set to 0. - - FOR j := 0 to 7 - i := j*64 - l := j*32 - IF k[j] - dst[l+31:l] := Convert_FP64_To_FP32(v2[i+63:i]) - ELSE - dst[l+31:l] := src[l+31:l] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512F -
immintrin.h
- Convert + + + + + Performs an element-by-element conversion of packed double-precision (64-bit) floating-point elements in "v2" to single-precision (32-bit) floating-point elements and stores them in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The elements are stored in the lower half of the results vector, while the remaining upper half locations are set to 0. + +FOR j := 0 to 7 + i := j*64 + l := j*32 + IF k[j] + dst[l+31:l] := Convert_FP64_To_FP32(v2[i+63:i]) + ELSE + dst[l+31:l] := src[l+31:l] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512F +
immintrin.h
+ Convert
- - - - - - Stores 8 packed 64-bit integer elements located in "a" and stores them in - memory locations starting at location "base_addr" at packed 32-bit integer indices - stored in "vindex" scaled by "scale". - - FOR j := 0 to 7 - i := j*64 - m := j*32 - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - ENDFOR - - - AVX512F -
immintrin.h
- Store + + + + + + Stores 8 packed 64-bit integer elements located in "a" and stores them in memory locations starting at location "base_addr" at packed 32-bit integer indices stored in "vindex" scaled by "scale". + +FOR j := 0 to 7 + i := j*64 + m := j*32 + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] +ENDFOR + + + AVX512F +
immintrin.h
+ Store
- - - - - - - Stores 8 packed 64-bit integer elements located in "a" and stores them in - memory locations starting at location "base_addr" at packed 32-bit integer indices - stored in "vindex" scaled by "scale" using writemask "k" (elements whose corresponding - mask bit is not set are not written to memory). - - FOR j := 0 to 7 - i := j*64 - m := j*32 - IF k[j] - addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 - MEM[addr+63:addr] := a[i+63:i] - FI - ENDFOR - - - AVX512F -
immintrin.h
- Store -
- - - - - - - Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" - to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the - intermediate result with the corresponding unsigned 64-bit integer in "a", and store the - results in "dst". - - FOR j := 0 to 3 - i := j*64 - tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) - dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512IFMA52 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + + Stores 8 packed 64-bit integer elements located in "a" and stores them in memory locations starting at location "base_addr" at packed 32-bit integer indices stored in "vindex" scaled by "scale" using writemask "k" (elements whose corresponding mask bit is not set are not written to memory). + +FOR j := 0 to 7 + i := j*64 + m := j*32 + IF k[j] + addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 + MEM[addr+63:addr] := a[i+63:i] + FI +ENDFOR + + + AVX512F +
immintrin.h
+ Store +
+ + + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512IFMA52 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" - to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the - intermediate result with the corresponding unsigned 64-bit integer in "a", and store the - results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) - dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512IFMA52 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512IFMA52 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" - to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the - intermediate result with the corresponding unsigned 64-bit integer in "a", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) - dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512IFMA52 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512IFMA52 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" - to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the - intermediate result with the corresponding unsigned 64-bit integer in "a", and store the - results in "dst". - - FOR j := 0 to 1 - i := j*64 - tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) - dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512IFMA52 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512IFMA52 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" - to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the - intermediate result with the corresponding unsigned 64-bit integer in "a", and store the - results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) - dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512IFMA52 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512IFMA52 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" - to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the - intermediate result with the corresponding unsigned 64-bit integer in "a", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) - dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512IFMA52 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512IFMA52 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" - to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the - intermediate result with the corresponding unsigned 64-bit integer in "a", and store the - results in "dst". - - FOR j := 0 to 3 - i := j*64 - tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) - dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512IFMA52 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512IFMA52 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" - to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the - intermediate result with the corresponding unsigned 64-bit integer in "a", and store the - results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) - dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512IFMA52 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512IFMA52 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" - to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the - intermediate result with the corresponding unsigned 64-bit integer in "a", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) - dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512IFMA52 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512IFMA52 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" - to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the - intermediate result with the corresponding unsigned 64-bit integer in "a", and store the - results in "dst". - - FOR j := 0 to 1 - i := j*64 - tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) - dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512IFMA52 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512IFMA52 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" - to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the - intermediate result with the corresponding unsigned 64-bit integer in "a", and store the - results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) - dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512IFMA52 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512IFMA52 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" - to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the - intermediate result with the corresponding unsigned 64-bit integer in "a", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) - dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512IFMA52 - AVX512VL -
immintrin.h
- Arithmetic -
- - - - - - - - Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" - to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the - intermediate result with the corresponding unsigned 64-bit integer in "a", and store the - results in "dst". - - FOR j := 0 to 7 - i := j*64 - tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) - dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512IFMA52 -
immintrin.h
- Arithmetic + + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512IFMA52 + AVX512VL +
immintrin.h
+ Arithmetic +
+ + + + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512IFMA52 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" - to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the - intermediate result with the corresponding unsigned 64-bit integer in "a", and store the - results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) - dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512IFMA52 -
immintrin.h
- Arithmetic + + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512IFMA52 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" - to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the - intermediate result with the corresponding unsigned 64-bit integer in "a", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) - dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512IFMA52 -
immintrin.h
- Arithmetic + + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512IFMA52 +
immintrin.h
+ Arithmetic
- - - - - Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" - to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the - intermediate result with the corresponding unsigned 64-bit integer in "a", and store the - results in "dst". - - FOR j := 0 to 7 - i := j*64 - tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) - dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512IFMA52 -
immintrin.h
- Arithmetic + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*64 + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512IFMA52 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" - to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the - intermediate result with the corresponding unsigned 64-bit integer in "a", and store the - results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) - dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512IFMA52 -
immintrin.h
- Arithmetic + + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512IFMA52 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" - to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the - intermediate result with the corresponding unsigned 64-bit integer in "a", and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) - dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512IFMA52 -
immintrin.h
- Arithmetic -
- - - - - - - Count the number of logical 1 bits in packed 64-bit integers in "a", and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := POPCNT(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512VPOPCNTDQ - AVX512VL -
immintrin.h
- Bit Manipulation + + + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i]) + dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512IFMA52 +
immintrin.h
+ Arithmetic +
+ + + + + + + Count the number of logical 1 bits in packed 64-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := POPCNT(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512VPOPCNTDQ + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - - Count the number of logical 1 bits in packed 64-bit integers in "a", and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := POPCNT(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512VPOPCNTDQ - AVX512VL -
immintrin.h
- Bit Manipulation + + + + + Count the number of logical 1 bits in packed 64-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := POPCNT(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512VPOPCNTDQ + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - Count the number of logical 1 bits in packed 64-bit integers in "a", and store - the results in "dst". - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := POPCNT(a[i+63:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512VPOPCNTDQ - AVX512VL -
immintrin.h
- Bit Manipulation + + + Count the number of logical 1 bits in packed 64-bit integers in "a", and store the results in "dst". + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := POPCNT(a[i+63:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512VPOPCNTDQ + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - Count the number of logical 1 bits in packed 64-bit integers in "a", and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := POPCNT(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512VPOPCNTDQ - AVX512VL -
immintrin.h
- Bit Manipulation + + + + Count the number of logical 1 bits in packed 64-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := POPCNT(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512VPOPCNTDQ + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - - Count the number of logical 1 bits in packed 64-bit integers in "a", and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := POPCNT(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512VPOPCNTDQ - AVX512VL -
immintrin.h
- Bit Manipulation + + + + + Count the number of logical 1 bits in packed 64-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := POPCNT(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512VPOPCNTDQ + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - Count the number of logical 1 bits in packed 64-bit integers in "a", and store - the results in "dst". - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := POPCNT(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512VPOPCNTDQ - AVX512VL -
immintrin.h
- Bit Manipulation + + + Count the number of logical 1 bits in packed 64-bit integers in "a", and store the results in "dst". + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := POPCNT(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512VPOPCNTDQ + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - Count the number of logical 1 bits in packed 32-bit integers in "a", and store - the results in "dst". - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := POPCNT(a[i+31:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512VPOPCNTDQ - AVX512VL -
immintrin.h
- Bit Manipulation + + + Count the number of logical 1 bits in packed 32-bit integers in "a", and store the results in "dst". + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := POPCNT(a[i+31:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512VPOPCNTDQ + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - - Count the number of logical 1 bits in packed 32-bit integers in "a", and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := POPCNT(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512VPOPCNTDQ - AVX512VL -
immintrin.h
- Bit Manipulation + + + + + Count the number of logical 1 bits in packed 32-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := POPCNT(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512VPOPCNTDQ + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - Count the number of logical 1 bits in packed 32-bit integers in "a", and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := POPCNT(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512VPOPCNTDQ - AVX512VL -
immintrin.h
- Bit Manipulation + + + + Count the number of logical 1 bits in packed 32-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := POPCNT(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512VPOPCNTDQ + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - Count the number of logical 1 bits in packed 32-bit integers in "a", and store - the results in "dst". - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := POPCNT(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512VPOPCNTDQ - AVX512VL -
immintrin.h
- Bit Manipulation + + + Count the number of logical 1 bits in packed 32-bit integers in "a", and store the results in "dst". + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := POPCNT(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512VPOPCNTDQ + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - - Count the number of logical 1 bits in packed 32-bit integers in "a", and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := POPCNT(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512VPOPCNTDQ - AVX512VL -
immintrin.h
- Bit Manipulation + + + + + Count the number of logical 1 bits in packed 32-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := POPCNT(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512VPOPCNTDQ + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - Count the number of logical 1 bits in packed 32-bit integers in "a", and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := POPCNT(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512VPOPCNTDQ - AVX512VL -
immintrin.h
- Bit Manipulation -
- - - - - - Count the number of logical 1 bits in packed 32-bit integers in "a", and store - the results in "dst". - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := POPCNT(a[i+31:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512VPOPCNTDQ -
immintrin.h
- Bit Manipulation + + + + Count the number of logical 1 bits in packed 32-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := POPCNT(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512VPOPCNTDQ + AVX512VL +
immintrin.h
+ Bit Manipulation +
+ + + + + + Count the number of logical 1 bits in packed 32-bit integers in "a", and store the results in "dst". + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := POPCNT(a[i+31:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512VPOPCNTDQ +
immintrin.h
+ Bit Manipulation
- - - - - Count the number of logical 1 bits in packed 32-bit integers in "a", and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := POPCNT(a[i+31:i]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512VPOPCNTDQ -
immintrin.h
- Bit Manipulation + + + + + Count the number of logical 1 bits in packed 32-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := POPCNT(a[i+31:i]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512VPOPCNTDQ +
immintrin.h
+ Bit Manipulation
- - - - Count the number of logical 1 bits in packed 32-bit integers in "a", and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := POPCNT(a[i+31:i]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512VPOPCNTDQ -
immintrin.h
- Bit Manipulation + + + + Count the number of logical 1 bits in packed 32-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := POPCNT(a[i+31:i]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512VPOPCNTDQ +
immintrin.h
+ Bit Manipulation
- - - Count the number of logical 1 bits in packed 64-bit integers in "a", and store - the results in "dst". - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := POPCNT(a[i+63:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512VPOPCNTDQ -
immintrin.h
- Bit Manipulation + + + Count the number of logical 1 bits in packed 64-bit integers in "a", and store the results in "dst". + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := POPCNT(a[i+63:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512VPOPCNTDQ +
immintrin.h
+ Bit Manipulation
- - - - - Count the number of logical 1 bits in packed 64-bit integers in "a", and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := POPCNT(a[i+63:i]) - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512VPOPCNTDQ -
immintrin.h
- Bit Manipulation + + + + + Count the number of logical 1 bits in packed 64-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := POPCNT(a[i+63:i]) + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512VPOPCNTDQ +
immintrin.h
+ Bit Manipulation
- - - - Count the number of logical 1 bits in packed 64-bit integers in "a", and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := POPCNT(a[i+63:i]) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512VPOPCNTDQ -
immintrin.h
- Bit Manipulation -
- - + + + + Count the number of logical 1 bits in packed 64-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := POPCNT(a[i+63:i]) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512VPOPCNTDQ +
immintrin.h
+ Bit Manipulation + + + - - - Convert packed BF16 (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst". This - intrinsic neither raises any floating point exceptions nor turns sNAN into qNAN. - - FOR j := 0 to 15 - i := j*32 - m := j*16 - dst[i+31:i] := Convert_BF16_To_FP32(a[m+15:m]) - ENDFOR - dst[MAX:512] := 0 - - AVX512_BF16 - AVX512F -
immintrin.h
- Convert + + + Convert packed BF16 (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". This intrinsic neither raises any floating point exceptions nor turns sNAN into qNAN. + +FOR j := 0 to 15 + i := j*32 + m := j*16 + dst[i+31:i] := Convert_BF16_To_FP32(a[m+15:m]) +ENDFOR +dst[MAX:512] := 0 + + AVX512_BF16 + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed BF16 (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This - intrinsic neither raises any floating point exceptions nor turns sNAN into qNAN. - - FOR j := 0 to 15 - i := j*32 - m := j*16 - IF k[j] - dst[i+31:i] := Convert_BF16_To_FP32(a[m+15:m]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512_BF16 - AVX512F -
immintrin.h
- Convert + + + + Convert packed BF16 (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic neither raises any floating point exceptions nor turns sNAN into qNAN. + +FOR j := 0 to 15 + i := j*32 + m := j*16 + IF k[j] + dst[i+31:i] := Convert_BF16_To_FP32(a[m+15:m]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512_BF16 + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed BF16 (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). This intrinsic neither raises any floating point exceptions nor turns sNAN into - qNAN. - - FOR j := 0 to 15 - i := j*32 - m := j*16 - IF k[j] - dst[i+31:i] := Convert_BF16_To_FP32(a[m+15:m]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512_BF16 - AVX512F -
immintrin.h
- Convert + + + + + Convert packed BF16 (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic neither raises any floating point exceptions nor turns sNAN into qNAN. + +FOR j := 0 to 15 + i := j*32 + m := j*16 + IF k[j] + dst[i+31:i] := Convert_BF16_To_FP32(a[m+15:m]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512_BF16 + AVX512F +
immintrin.h
+ Convert
- - - Convert the BF16 (16-bit) floating-point element in "a" to a floating-point - element, and store the result in "dst". This intrinsic neither raises any floating point - exceptions nor turns sNAN into qNAN. - - dst[31:0] := Convert_BF16_To_FP32(a[15:0]) - - AVX512_BF16 - AVX512F -
immintrin.h
- Convert + + + Convert the BF16 (16-bit) floating-point element in "a" to a floating-point element, and store the result in "dst". This intrinsic neither raises any floating point exceptions nor turns sNAN into qNAN. + +dst[31:0] := Convert_BF16_To_FP32(a[15:0]) + + AVX512_BF16 + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in two vectors - "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in - single vector "dst". - - FOR j := 0 to 31 - IF j < 16 - t := b.fp32[j] - ELSE - t := a.fp32[j-16] - FI - dst.word[j] := Convert_FP32_To_BF16(t) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_BF16 - AVX512F -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in two vectors "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in single vector "dst". + +FOR j := 0 to 31 + IF j < 16 + t := b.fp32[j] + ELSE + t := a.fp32[j-16] + FI + dst.word[j] := Convert_FP32_To_BF16(t) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_BF16 + AVX512F +
immintrin.h
+ Convert
- - - - - - Convert packed single-precision (32-bit) floating-point elements in two vectors - "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in - single vector "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - IF k[j] - IF j < 16 + + + + + + Convert packed single-precision (32-bit) floating-point elements in two vectors "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in single vector "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + IF k[j] + IF j < 16 t := b.fp32[j] - ELSE + ELSE t := a.fp32[j-16] - FI - dst.word[j] := Convert_FP32_To_BF16(t) - ELSE - dst.word[j] := src.word[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_BF16 - AVX512F -
immintrin.h
- Convert + FI + dst.word[j] := Convert_FP32_To_BF16(t) + ELSE + dst.word[j] := src.word[j] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512_BF16 + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in two vectors - "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in - single vector "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 31 - IF k[j] - IF j < 16 + + + + + Convert packed single-precision (32-bit) floating-point elements in two vectors "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in single vector "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + IF k[j] + IF j < 16 t := b.fp32[j] - ELSE + ELSE t := a.fp32[j-16] - FI - dst.word[j] := Convert_FP32_To_BF16(t) - ELSE - dst.word[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_BF16 - AVX512F -
immintrin.h
- Convert + FI + dst.word[j] := Convert_FP32_To_BF16(t) + ELSE + dst.word[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512_BF16 + AVX512F +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed BF16 (16-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 15 - dst.word[j] := Convert_FP32_To_BF16(a.fp32[j]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_BF16 - AVX512F -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed BF16 (16-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 15 + dst.word[j] := Convert_FP32_To_BF16(a.fp32[j]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_BF16 + AVX512F +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed BF16 (16-bit) floating-point elements, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - IF k[j] - dst.word[j] := Convert_FP32_To_BF16(a.fp32[j]) - ELSE - dst.word[j] := src.word[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_BF16 - AVX512F -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed BF16 (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + dst.word[j] := Convert_FP32_To_BF16(a.fp32[j]) + ELSE + dst.word[j] := src.word[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_BF16 + AVX512F +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed BF16 (16-bit) floating-point elements, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - IF k[j] - dst.word[j] := Convert_FP32_To_BF16(a.fp32[j]) - ELSE - dst.word[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_BF16 - AVX512F -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed BF16 (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + dst.word[j] := Convert_FP32_To_BF16(a.fp32[j]) + ELSE + dst.word[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_BF16 + AVX512F +
immintrin.h
+ Convert
- - - - - Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", - accumulating the intermediate single-precision (32-bit) floating-point elements with - elements in "src", and store the results in "dst". - - DEFINE make_fp32(x[15:0]) { - y.fp32 := 0.0 - y[31:16] := x[15:0] - RETURN y - } - dst := src - FOR j := 0 to 15 - dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1]) - dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_BF16 - AVX512F -
immintrin.h
- Arithmetic + + + + + Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", accumulating the intermediate single-precision (32-bit) floating-point elements with elements in "src", and store the results in "dst". + +DEFINE make_fp32(x[15:0]) { + y.fp32 := 0.0 + y[31:16] := x[15:0] + RETURN y +} +dst := src +FOR j := 0 to 15 + dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1]) + dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_BF16 + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", - accumulating the intermediate single-precision (32-bit) floating-point elements with - elements in "src", and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - DEFINE make_fp32(x[15:0]) { - y.fp32 := 0.0 - y[31:16] := x[15:0] - RETURN y - } - dst := src - FOR j := 0 to 15 - IF k[j] - dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1]) - dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0]) - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_BF16 - AVX512F -
immintrin.h
- Arithmetic + + + + + + Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", accumulating the intermediate single-precision (32-bit) floating-point elements with elements in "src", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE make_fp32(x[15:0]) { + y.fp32 := 0.0 + y[31:16] := x[15:0] + RETURN y +} +dst := src +FOR j := 0 to 15 + IF k[j] + dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1]) + dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0]) + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_BF16 + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", - accumulating the intermediate single-precision (32-bit) floating-point elements with - elements in "src", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - DEFINE make_fp32(x[15:0]) { - y.fp32 := 0.0 - y[31:16] := x[15:0] - RETURN y - } - dst := src - FOR j := 0 to 15 - IF k[j] - dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1]) - dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0]) - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_BF16 - AVX512F -
immintrin.h
- Arithmetic -
- - + + + + + + Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", accumulating the intermediate single-precision (32-bit) floating-point elements with elements in "src", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE make_fp32(x[15:0]) { + y.fp32 := 0.0 + y[31:16] := x[15:0] + RETURN y +} +dst := src +FOR j := 0 to 15 + IF k[j] + dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1]) + dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0]) + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_BF16 + AVX512F +
immintrin.h
+ Arithmetic + + + - - - Convert packed BF16 (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst". This - intrinsic neither raises any floating point exceptions nor turns sNAN into qNAN. - - FOR j := 0 to 3 - i := j*32 - m := j*16 - dst[i+31:i] := Convert_BF16_To_FP32(a[m+15:m]) - ENDFOR - dst[MAX:128] := 0 - - AVX512_BF16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed BF16 (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". This intrinsic neither raises any floating point exceptions nor turns sNAN into qNAN. + +FOR j := 0 to 3 + i := j*32 + m := j*16 + dst[i+31:i] := Convert_BF16_To_FP32(a[m+15:m]) +ENDFOR +dst[MAX:128] := 0 + + AVX512_BF16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed BF16 (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This - intrinsic neither raises any floating point exceptions nor turns sNAN into qNAN. - - FOR j := 0 to 3 - i := j*32 - m := j*16 - IF k[j] - dst[i+31:i] := Convert_BF16_To_FP32(a[m+15:m]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - AVX512_BF16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed BF16 (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic neither raises any floating point exceptions nor turns sNAN into qNAN. + +FOR j := 0 to 3 + i := j*32 + m := j*16 + IF k[j] + dst[i+31:i] := Convert_BF16_To_FP32(a[m+15:m]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + AVX512_BF16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed BF16 (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). This intrinsic neither raises any floating point exceptions nor turns sNAN into - qNAN. - - FOR j := 0 to 3 - i := j*32 - m := j*16 - IF k[j] - dst[i+31:i] := Convert_BF16_To_FP32(a[m+15:m]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - AVX512_BF16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed BF16 (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic neither raises any floating point exceptions nor turns sNAN into qNAN. + +FOR j := 0 to 3 + i := j*32 + m := j*16 + IF k[j] + dst[i+31:i] := Convert_BF16_To_FP32(a[m+15:m]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + AVX512_BF16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed BF16 (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst". This - intrinsic neither raises any floating point exceptions nor turns sNAN into qNAN. - - FOR j := 0 to 7 - i := j*32 - m := j*16 - dst[i+31:i] := Convert_BF16_To_FP32(a[m+15:m]) - ENDFOR - dst[MAX:256] := 0 - - AVX512_BF16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed BF16 (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". This intrinsic neither raises any floating point exceptions nor turns sNAN into qNAN. + +FOR j := 0 to 7 + i := j*32 + m := j*16 + dst[i+31:i] := Convert_BF16_To_FP32(a[m+15:m]) +ENDFOR +dst[MAX:256] := 0 + + AVX512_BF16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed BF16 (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This - intrinsic neither raises any floating point exceptions nor turns sNAN into qNAN. - - FOR j := 0 to 7 - i := j*32 - m := j*16 - IF k[j] - dst[i+31:i] := Convert_BF16_To_FP32(a[m+15:m]) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - AVX512_BF16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed BF16 (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic neither raises any floating point exceptions nor turns sNAN into qNAN. + +FOR j := 0 to 7 + i := j*32 + m := j*16 + IF k[j] + dst[i+31:i] := Convert_BF16_To_FP32(a[m+15:m]) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + AVX512_BF16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed BF16 (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). This intrinsic neither raises any floating point exceptions nor turns sNAN into - qNAN. - - FOR j := 0 to 7 - i := j*32 - m := j*16 - IF k[j] - dst[i+31:i] := Convert_BF16_To_FP32(a[m+15:m]) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - AVX512_BF16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed BF16 (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic neither raises any floating point exceptions nor turns sNAN into qNAN. + +FOR j := 0 to 7 + i := j*32 + m := j*16 + IF k[j] + dst[i+31:i] := Convert_BF16_To_FP32(a[m+15:m]) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + AVX512_BF16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert the single-precision (32-bit) floating-point element in "a" to a BF16 - (16-bit) floating-point element, and store the result in "dst". - - dst[15:0] := Convert_FP32_To_BF16(a[31:0]) - - AVX512_BF16 - AVX512VL -
immintrin.h
- Convert + + + Convert the single-precision (32-bit) floating-point element in "a" to a BF16 (16-bit) floating-point element, and store the result in "dst". + +dst[15:0] := Convert_FP32_To_BF16(a[31:0]) + + AVX512_BF16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in two vectors - "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in - single vector "dst". - - FOR j := 0 to 7 - IF j < 4 - t := b.fp32[j] - ELSE - t := a.fp32[j-4] - FI - dst.word[j] := Convert_FP32_To_BF16(t) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_BF16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in two vectors "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in single vector "dst". + +FOR j := 0 to 7 + IF j < 4 + t := b.fp32[j] + ELSE + t := a.fp32[j-4] + FI + dst.word[j] := Convert_FP32_To_BF16(t) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_BF16 + AVX512VL +
immintrin.h
+ Convert
- - - - - - Convert packed single-precision (32-bit) floating-point elements in two vectors - "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in - single vector "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - IF k[j] - IF j < 4 + + + + + + Convert packed single-precision (32-bit) floating-point elements in two vectors "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in single vector "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + IF j < 4 t := b.fp32[j] - ELSE + ELSE t := a.fp32[j-4] - FI - dst.word[j] := Convert_FP32_To_BF16(t) - ELSE - dst.word[j] := src.word[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_BF16 - AVX512VL -
immintrin.h
- Convert + FI + dst.word[j] := Convert_FP32_To_BF16(t) + ELSE + dst.word[j] := src.word[j] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512_BF16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in two vectors - "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in - single vector "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 7 - IF k[j] - IF j < 4 + + + + + Convert packed single-precision (32-bit) floating-point elements in two vectors "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in single vector "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + IF j < 4 t := b.fp32[j] - ELSE + ELSE t := a.fp32[j-4] - FI - dst.word[j] := Convert_FP32_To_BF16(t) - ELSE - dst.word[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_BF16 - AVX512VL -
immintrin.h
- Convert + FI + dst.word[j] := Convert_FP32_To_BF16(t) + ELSE + dst.word[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512_BF16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in two vectors - "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in - single vector "dst". - - FOR j := 0 to 15 - IF j < 8 - t := b.fp32[j] - ELSE - t := a.fp32[j-8] - FI - dst.word[j] := Convert_FP32_To_BF16(t) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_BF16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in two vectors "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in single vector "dst". + +FOR j := 0 to 15 + IF j < 8 + t := b.fp32[j] + ELSE + t := a.fp32[j-8] + FI + dst.word[j] := Convert_FP32_To_BF16(t) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_BF16 + AVX512VL +
immintrin.h
+ Convert
- - - - - - Convert packed single-precision (32-bit) floating-point elements in two vectors - "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in - single vector "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - IF k[j] - IF j < 8 + + + + + + Convert packed single-precision (32-bit) floating-point elements in two vectors "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in single vector "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + IF j < 8 t := b.fp32[j] - ELSE + ELSE t := a.fp32[j-8] - FI - dst.word[j] := Convert_FP32_To_BF16(t) - ELSE - dst.word[j] := src.word[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_BF16 - AVX512VL -
immintrin.h
- Convert + FI + dst.word[j] := Convert_FP32_To_BF16(t) + ELSE + dst.word[j] := src.word[j] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512_BF16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in two vectors - "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in - single vector "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 15 - IF k[j] - IF j < 8 + + + + + Convert packed single-precision (32-bit) floating-point elements in two vectors "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in single vector "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + IF j < 8 t := b.fp32[j] - ELSE + ELSE t := a.fp32[j-8] - FI - dst.word[j] := Convert_FP32_To_BF16(t) - ELSE - dst.word[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_BF16 - AVX512VL -
immintrin.h
- Convert + FI + dst.word[j] := Convert_FP32_To_BF16(t) + ELSE + dst.word[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512_BF16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed BF16 (16-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 3 - dst.word[j] := Convert_FP32_To_BF16(a.fp32[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_BF16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed BF16 (16-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + dst.word[j] := Convert_FP32_To_BF16(a.fp32[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_BF16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed BF16 (16-bit) floating-point elements, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - IF k[j] - dst.word[j] := Convert_FP32_To_BF16(a.fp32[j]) - ELSE - dst.word[j] := src.word[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_BF16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed BF16 (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + IF k[j] + dst.word[j] := Convert_FP32_To_BF16(a.fp32[j]) + ELSE + dst.word[j] := src.word[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_BF16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed BF16 (16-bit) floating-point elements, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - IF k[j] - dst.word[j] := Convert_FP32_To_BF16(a.fp32[j]) - ELSE - dst.word[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_BF16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed BF16 (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + IF k[j] + dst.word[j] := Convert_FP32_To_BF16(a.fp32[j]) + ELSE + dst.word[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_BF16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed BF16 (16-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 7 - dst.word[j] := Convert_FP32_To_BF16(a.fp32[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_BF16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed BF16 (16-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 7 + dst.word[j] := Convert_FP32_To_BF16(a.fp32[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_BF16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed BF16 (16-bit) floating-point elements, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - IF k[j] - dst.word[j] := Convert_FP32_To_BF16(a.fp32[j]) - ELSE - dst.word[j] := src.word[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_BF16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed BF16 (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + dst.word[j] := Convert_FP32_To_BF16(a.fp32[j]) + ELSE + dst.word[j] := src.word[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_BF16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed BF16 (16-bit) floating-point elements, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - IF k[j] - dst.word[j] := Convert_FP32_To_BF16(a.fp32[j]) - ELSE - dst.word[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_BF16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed BF16 (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + dst.word[j] := Convert_FP32_To_BF16(a.fp32[j]) + ELSE + dst.word[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_BF16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", - accumulating the intermediate single-precision (32-bit) floating-point elements with - elements in "src", and store the results in "dst". - - DEFINE make_fp32(x[15:0]) { - y.fp32 := 0.0 - y[31:16] := x[15:0] - RETURN y - } - dst := src - FOR j := 0 to 3 - dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1]) - dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_BF16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", accumulating the intermediate single-precision (32-bit) floating-point elements with elements in "src", and store the results in "dst". + +DEFINE make_fp32(x[15:0]) { + y.fp32 := 0.0 + y[31:16] := x[15:0] + RETURN y +} +dst := src +FOR j := 0 to 3 + dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1]) + dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_BF16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", - accumulating the intermediate single-precision (32-bit) floating-point elements with - elements in "src", and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - DEFINE make_fp32(x[15:0]) { - y.fp32 := 0.0 - y[31:16] := x[15:0] - RETURN y - } - dst := src - FOR j := 0 to 3 - IF k[j] - dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1]) - dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0]) - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_BF16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", accumulating the intermediate single-precision (32-bit) floating-point elements with elements in "src", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE make_fp32(x[15:0]) { + y.fp32 := 0.0 + y[31:16] := x[15:0] + RETURN y +} +dst := src +FOR j := 0 to 3 + IF k[j] + dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1]) + dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0]) + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_BF16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", - accumulating the intermediate single-precision (32-bit) floating-point elements with - elements in "src", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - DEFINE make_fp32(x[15:0]) { - y.fp32 := 0.0 - y[31:16] := x[15:0] - RETURN y - } - dst := src - FOR j := 0 to 3 - IF k[j] - dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1]) - dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0]) - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_BF16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", accumulating the intermediate single-precision (32-bit) floating-point elements with elements in "src", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE make_fp32(x[15:0]) { + y.fp32 := 0.0 + y[31:16] := x[15:0] + RETURN y +} +dst := src +FOR j := 0 to 3 + IF k[j] + dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1]) + dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0]) + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_BF16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", - accumulating the intermediate single-precision (32-bit) floating-point elements with - elements in "src", and store the results in "dst". - - DEFINE make_fp32(x[15:0]) { - y.fp32 := 0.0 - y[31:16] := x[15:0] - RETURN y - } - dst := src - FOR j := 0 to 7 - dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1]) - dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_BF16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", accumulating the intermediate single-precision (32-bit) floating-point elements with elements in "src", and store the results in "dst". + +DEFINE make_fp32(x[15:0]) { + y.fp32 := 0.0 + y[31:16] := x[15:0] + RETURN y +} +dst := src +FOR j := 0 to 7 + dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1]) + dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_BF16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", - accumulating the intermediate single-precision (32-bit) floating-point elements with - elements in "src", and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - DEFINE make_fp32(x[15:0]) { - y.fp32 := 0.0 - y[31:16] := x[15:0] - RETURN y - } - dst := src - FOR j := 0 to 7 - IF k[j] - dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1]) - dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0]) - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_BF16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", accumulating the intermediate single-precision (32-bit) floating-point elements with elements in "src", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE make_fp32(x[15:0]) { + y.fp32 := 0.0 + y[31:16] := x[15:0] + RETURN y +} +dst := src +FOR j := 0 to 7 + IF k[j] + dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1]) + dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0]) + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_BF16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", - accumulating the intermediate single-precision (32-bit) floating-point elements with - elements in "src", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - DEFINE make_fp32(x[15:0]) { - y.fp32 := 0.0 - y[31:16] := x[15:0] - RETURN y - } - dst := src - FOR j := 0 to 7 - IF k[j] - dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1]) - dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0]) - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_BF16 - AVX512VL -
immintrin.h
- Arithmetic -
- - - - - - - - Gather 64 bits from "b" using selection bits in "c". For each 64-bit element in - "b", gather 8 bits from the 64-bit element in "b" at 8 bit position controlled by the 8 - corresponding 8-bit elements of "c", and store the result in the corresponding 8-bit - element of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR i := 0 to 3 //Qword - FOR j := 0 to 7 // Byte - IF k[i*8+j] + + + + + + Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", accumulating the intermediate single-precision (32-bit) floating-point elements with elements in "src", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE make_fp32(x[15:0]) { + y.fp32 := 0.0 + y[31:16] := x[15:0] + RETURN y +} +dst := src +FOR j := 0 to 7 + IF k[j] + dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1]) + dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0]) + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_BF16 + AVX512VL +
immintrin.h
+ Arithmetic +
+ + + + + + + + Gather 64 bits from "b" using selection bits in "c". For each 64-bit element in "b", gather 8 bits from the 64-bit element in "b" at 8 bit position controlled by the 8 corresponding 8-bit elements of "c", and store the result in the corresponding 8-bit element of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR i := 0 to 3 //Qword + FOR j := 0 to 7 // Byte + IF k[i*8+j] m := c.qword[i].byte[j] & 0x3F dst[i*8+j] := b.qword[i].bit[m] - ELSE + ELSE dst[i*8+j] := 0 - FI - ENDFOR - ENDFOR - dst[MAX:32] := 0 - - - AVX512_BITALG - AVX512VL -
immintrin.h
- Bit Manipulation + FI + ENDFOR +ENDFOR +dst[MAX:32] := 0 + + + AVX512_BITALG + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - Gather 64 bits from "b" using selection bits in "c". For each 64-bit element in - "b", gather 8 bits from the 64-bit element in "b" at 8 bit position controlled by the 8 - corresponding 8-bit elements of "c", and store the result in the corresponding 8-bit - element of "dst". - - FOR i := 0 to 3 //Qword - FOR j := 0 to 7 // Byte - m := c.qword[i].byte[j] & 0x3F - dst[i*8+j] := b.qword[i].bit[m] - ENDFOR - ENDFOR - dst[MAX:32] := 0 - - - AVX512_BITALG - AVX512VL -
immintrin.h
- Bit Manipulation + + + + Gather 64 bits from "b" using selection bits in "c". For each 64-bit element in "b", gather 8 bits from the 64-bit element in "b" at 8 bit position controlled by the 8 corresponding 8-bit elements of "c", and store the result in the corresponding 8-bit element of "dst". + +FOR i := 0 to 3 //Qword + FOR j := 0 to 7 // Byte + m := c.qword[i].byte[j] & 0x3F + dst[i*8+j] := b.qword[i].bit[m] + ENDFOR +ENDFOR +dst[MAX:32] := 0 + + + AVX512_BITALG + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - - Gather 64 bits from "b" using selection bits in "c". For each 64-bit element in - "b", gather 8 bits from the 64-bit element in "b" at 8 bit position controlled by the 8 - corresponding 8-bit elements of "c", and store the result in the corresponding 8-bit - element of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR i := 0 to 1 //Qword - FOR j := 0 to 7 // Byte - IF k[i*8+j] + + + + + Gather 64 bits from "b" using selection bits in "c". For each 64-bit element in "b", gather 8 bits from the 64-bit element in "b" at 8 bit position controlled by the 8 corresponding 8-bit elements of "c", and store the result in the corresponding 8-bit element of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR i := 0 to 1 //Qword + FOR j := 0 to 7 // Byte + IF k[i*8+j] m := c.qword[i].byte[j] & 0x3F dst[i*8+j] := b.qword[i].bit[m] - ELSE + ELSE dst[i*8+j] := 0 - FI - ENDFOR - ENDFOR - dst[MAX:16] := 0 - - - AVX512_BITALG - AVX512VL -
immintrin.h
- Bit Manipulation + FI + ENDFOR +ENDFOR +dst[MAX:16] := 0 +
+ + AVX512_BITALG + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - Gather 64 bits from "b" using selection bits in "c". For each 64-bit element in - "b", gather 8 bits from the 64-bit element in "b" at 8 bit position controlled by the 8 - corresponding 8-bit elements of "c", and store the result in the corresponding 8-bit - element of "dst". - - FOR i := 0 to 1 //Qword - FOR j := 0 to 7 // Byte - m := c.qword[i].byte[j] & 0x3F - dst[i*8+j] := b.qword[i].bit[m] - ENDFOR - ENDFOR - dst[MAX:16] := 0 - - - AVX512_BITALG - AVX512VL -
immintrin.h
- Bit Manipulation + + + + Gather 64 bits from "b" using selection bits in "c". For each 64-bit element in "b", gather 8 bits from the 64-bit element in "b" at 8 bit position controlled by the 8 corresponding 8-bit elements of "c", and store the result in the corresponding 8-bit element of "dst". + +FOR i := 0 to 1 //Qword + FOR j := 0 to 7 // Byte + m := c.qword[i].byte[j] & 0x3F + dst[i*8+j] := b.qword[i].bit[m] + ENDFOR +ENDFOR +dst[MAX:16] := 0 + + + AVX512_BITALG + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - Count the number of logical 1 bits in packed 16-bit integers in "a", and store - the results in "dst". - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := POPCNT(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_BITALG - AVX512VL -
immintrin.h
- Bit Manipulation + + + Count the number of logical 1 bits in packed 16-bit integers in "a", and store the results in "dst". + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := POPCNT(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_BITALG + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - - Count the number of logical 1 bits in packed 16-bit integers in "a", and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := POPCNT(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_BITALG - AVX512VL -
immintrin.h
- Bit Manipulation + + + + + Count the number of logical 1 bits in packed 16-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := POPCNT(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_BITALG + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - Count the number of logical 1 bits in packed 16-bit integers in "a", and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := POPCNT(a[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_BITALG - AVX512VL -
immintrin.h
- Bit Manipulation + + + + Count the number of logical 1 bits in packed 16-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := POPCNT(a[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_BITALG + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - Count the number of logical 1 bits in packed 16-bit integers in "a", and store - the results in "dst". - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := POPCNT(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_BITALG - AVX512VL -
immintrin.h
- Bit Manipulation + + + Count the number of logical 1 bits in packed 16-bit integers in "a", and store the results in "dst". + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := POPCNT(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_BITALG + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - - Count the number of logical 1 bits in packed 16-bit integers in "a", and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := POPCNT(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_BITALG - AVX512VL -
immintrin.h
- Bit Manipulation + + + + + Count the number of logical 1 bits in packed 16-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := POPCNT(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_BITALG + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - Count the number of logical 1 bits in packed 16-bit integers in "a", and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := POPCNT(a[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_BITALG - AVX512VL -
immintrin.h
- Bit Manipulation + + + + Count the number of logical 1 bits in packed 16-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := POPCNT(a[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_BITALG + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - Count the number of logical 1 bits in packed 8-bit integers in "a", and store - the results in "dst". - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 31 - i := j*8 - dst[i+7:i] := POPCNT(a[i+7:i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_BITALG - AVX512VL -
immintrin.h
- Bit Manipulation + + + Count the number of logical 1 bits in packed 8-bit integers in "a", and store the results in "dst". + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 31 + i := j*8 + dst[i+7:i] := POPCNT(a[i+7:i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_BITALG + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - - Count the number of logical 1 bits in packed 8-bit integers in "a", and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := POPCNT(a[i+7:i]) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_BITALG - AVX512VL -
immintrin.h
- Bit Manipulation + + + + + Count the number of logical 1 bits in packed 8-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := POPCNT(a[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_BITALG + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - Count the number of logical 1 bits in packed 8-bit integers in "a", and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := POPCNT(a[i+7:i]) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_BITALG - AVX512VL -
immintrin.h
- Bit Manipulation + + + + Count the number of logical 1 bits in packed 8-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := POPCNT(a[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_BITALG + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - Count the number of logical 1 bits in packed 8-bit integers in "a", and store - the results in "dst". - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 15 - i := j*8 - dst[i+7:i] := POPCNT(a[i+7:i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_BITALG - AVX512VL -
immintrin.h
- Bit Manipulation + + + Count the number of logical 1 bits in packed 8-bit integers in "a", and store the results in "dst". + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := POPCNT(a[i+7:i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_BITALG + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - - Count the number of logical 1 bits in packed 8-bit integers in "a", and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := POPCNT(a[i+7:i]) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_BITALG - AVX512VL -
immintrin.h
- Bit Manipulation + + + + + Count the number of logical 1 bits in packed 8-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := POPCNT(a[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_BITALG + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - Count the number of logical 1 bits in packed 8-bit integers in "a", and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := POPCNT(a[i+7:i]) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_BITALG - AVX512VL -
immintrin.h
- Bit Manipulation -
- - - - - - - - Gather 64 bits from "b" using selection bits in "c". For each 64-bit element in - "b", gather 8 bits from the 64-bit element in "b" at 8 bit position controlled by the 8 - corresponding 8-bit elements of "c", and store the result in the corresponding 8-bit - element of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR i := 0 to 7 //Qword - FOR j := 0 to 7 // Byte - IF k[i*8+j] + + + + Count the number of logical 1 bits in packed 8-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := POPCNT(a[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_BITALG + AVX512VL +
immintrin.h
+ Bit Manipulation +
+ + + + + + + + Gather 64 bits from "b" using selection bits in "c". For each 64-bit element in "b", gather 8 bits from the 64-bit element in "b" at 8 bit position controlled by the 8 corresponding 8-bit elements of "c", and store the result in the corresponding 8-bit element of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR i := 0 to 7 //Qword + FOR j := 0 to 7 // Byte + IF k[i*8+j] m := c.qword[i].byte[j] & 0x3F dst[i*8+j] := b.qword[i].bit[m] - ELSE + ELSE dst[i*8+j] := 0 - FI - ENDFOR - ENDFOR - dst[MAX:64] := 0 - - - AVX512_BITALG -
immintrin.h
- Bit Manipulation + FI + ENDFOR +ENDFOR +dst[MAX:64] := 0 + + + AVX512_BITALG +
immintrin.h
+ Bit Manipulation
- - - - Gather 64 bits from "b" using selection bits in "c". For each 64-bit element in - "b", gather 8 bits from the 64-bit element in "b" at 8 bit position controlled by the 8 - corresponding 8-bit elements of "c", and store the result in the corresponding 8-bit - element of "dst". - - FOR i := 0 to 7 //Qword - FOR j := 0 to 7 // Byte - m := c.qword[i].byte[j] & 0x3F - dst[i*8+j] := b.qword[i].bit[m] - ENDFOR - ENDFOR - dst[MAX:64] := 0 - - - AVX512_BITALG -
immintrin.h
- Bit Manipulation + + + + Gather 64 bits from "b" using selection bits in "c". For each 64-bit element in "b", gather 8 bits from the 64-bit element in "b" at 8 bit position controlled by the 8 corresponding 8-bit elements of "c", and store the result in the corresponding 8-bit element of "dst". + +FOR i := 0 to 7 //Qword + FOR j := 0 to 7 // Byte + m := c.qword[i].byte[j] & 0x3F + dst[i*8+j] := b.qword[i].bit[m] + ENDFOR +ENDFOR +dst[MAX:64] := 0 + + + AVX512_BITALG +
immintrin.h
+ Bit Manipulation
- - - Count the number of logical 1 bits in packed 16-bit integers in "a", and store - the results in "dst". - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := POPCNT(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_BITALG -
immintrin.h
- Bit Manipulation + + + Count the number of logical 1 bits in packed 16-bit integers in "a", and store the results in "dst". + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := POPCNT(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_BITALG +
immintrin.h
+ Bit Manipulation
- - - - - Count the number of logical 1 bits in packed 16-bit integers in "a", and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := POPCNT(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_BITALG -
immintrin.h
- Bit Manipulation + + + + + Count the number of logical 1 bits in packed 16-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := POPCNT(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_BITALG +
immintrin.h
+ Bit Manipulation
- - - - Count the number of logical 1 bits in packed 16-bit integers in "a", and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := POPCNT(a[i+15:i]) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_BITALG -
immintrin.h
- Bit Manipulation + + + + Count the number of logical 1 bits in packed 16-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := POPCNT(a[i+15:i]) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_BITALG +
immintrin.h
+ Bit Manipulation
- - - Count the number of logical 1 bits in packed 8-bit integers in "a", and store - the results in "dst". - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 63 - i := j*8 - dst[i+7:i] := POPCNT(a[i+7:i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_BITALG -
immintrin.h
- Bit Manipulation + + + Count the number of logical 1 bits in packed 8-bit integers in "a", and store the results in "dst". + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 63 + i := j*8 + dst[i+7:i] := POPCNT(a[i+7:i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_BITALG +
immintrin.h
+ Bit Manipulation
- - - - - Count the number of logical 1 bits in packed 8-bit integers in "a", and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := POPCNT(a[i+7:i]) - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_BITALG -
immintrin.h
- Bit Manipulation + + + + + Count the number of logical 1 bits in packed 8-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := POPCNT(a[i+7:i]) + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_BITALG +
immintrin.h
+ Bit Manipulation
- - - - Count the number of logical 1 bits in packed 8-bit integers in "a", and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - DEFINE POPCNT(a) { - count := 0 - DO WHILE a > 0 - count += a[0] - a >>= 1 - OD - RETURN count - } - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := POPCNT(a[i+7:i]) - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_BITALG -
immintrin.h
- Bit Manipulation -
- - + + + + Count the number of logical 1 bits in packed 8-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE POPCNT(a) { + count := 0 + DO WHILE a > 0 + count += a[0] + a >>= 1 + OD + RETURN count +} +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := POPCNT(a[i+7:i]) + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_BITALG +
immintrin.h
+ Bit Manipulation + + + - Compute the inverse cosine of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". + Compute the inverse cosine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := ACOS(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := ACOS(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse hyperbolic cosine of packed half-precision (16-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". + Compute the inverse hyperbolic cosine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := ACOSH(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := ACOSH(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse sine of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". + Compute the inverse sine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := ASIN(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := ASIN(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse hyperbolic sine of packed half-precision (16-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". + Compute the inverse hyperbolic sine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := ASINH(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := ASINH(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse tangent of packed half-precision (16-bit) floating-point - elements in "a" divided by packed elements in "b", and store the results in "dst" - expressed in radians. + Compute the inverse tangent of packed half-precision (16-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians. - Trigonometry - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := ATAN2(a[i+15:i], b[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := ATAN2(a[i+15:i], b[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse tangent of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". + Compute the inverse tangent of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := ATAN(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := ATAN(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse hyperbolic tangent of packed half-precision (16-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". + Compute the inverse hyperbolic tangent of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := ATANH(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := ATANH(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the cube root of packed half-precision (16-bit) floating-point elements - in "a", and store the results in "dst". + Compute the cube root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := CubeRoot(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math FunctionsFOR j := 0 to 15 + i := j*16 + dst[i+15:i] := CubeRoot(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the cumulative distribution function of packed half-precision (16-bit) - floating-point elements in "a" using the normal distribution, and store the results in - "dst". + Compute the cumulative distribution function of packed half-precision (16-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". - Probability/Statistics - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := CDFNormal(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Probability/StatisticsFOR j := 0 to 15 + i := j*16 + dst[i+15:i] := CDFNormal(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse cumulative distribution function of packed half-precision - (16-bit) floating-point elements in "a" using the normal distribution, and store the - results in "dst". + Compute the inverse cumulative distribution function of packed half-precision (16-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". - Probability/Statistics - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := InverseCDFNormal(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Probability/StatisticsFOR j := 0 to 15 + i := j*16 + dst[i+15:i] := InverseCDFNormal(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the cosine of packed half-precision (16-bit) floating-point elements in - "a" expressed in radians, and store the results in "dst". + Compute the cosine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := COS(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := COS(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the cosine of packed half-precision (16-bit) floating-point elements in - "a" expressed in degrees, and store the results in "dst". + Compute the cosine of packed half-precision (16-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". - Trigonometry - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := COSD(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ TrigonometryFOR j := 0 to 15 + i := j*16 + dst[i+15:i] := COSD(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the hyperbolic cosine of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". + Compute the hyperbolic cosine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := COSH(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := COSH(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the error function of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst". + Compute the error function of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Probability/Statistics - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := ERF(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Probability/StatisticsFOR j := 0 to 15 + i := j*16 + dst[i+15:i] := ERF(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the complementary error function of packed half-precision (16-bit) - floating-point elements in "a", and store the results in "dst". + Compute the complementary error function of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Probability/Statistics - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := 1.0 - ERF(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Probability/StatisticsFOR j := 0 to 15 + i := j*16 + dst[i+15:i] := 1.0 - ERF(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse complementary error function of packed half-precision - (16-bit) floating-point elements in "a", and store the results in "dst". + Compute the inverse complementary error function of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Probability/Statistics - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := 1.0 / (1.0 - ERF(a[i+15:i])) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Probability/StatisticsFOR j := 0 to 15 + i := j*16 + dst[i+15:i] := 1.0 / (1.0 - ERF(a[i+15:i])) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse error function of packed half-precision (16-bit) - floating-point elements in "a", and store the results in "dst". + Compute the inverse error function of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Probability/Statistics - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := 1.0 / ERF(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Probability/StatisticsFOR j := 0 to 15 + i := j*16 + dst[i+15:i] := 1.0 / ERF(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the exponential value of 10 raised to the power of packed - half-precision (16-bit) floating-point elements in "a", and store the results in "dst". + Compute the exponential value of 10 raised to the power of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := POW(FP16(10.0), a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := POW(FP16(10.0), a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the exponential value of 2 raised to the power of packed half-precision - (16-bit) floating-point elements in "a", and store the results in "dst". + Compute the exponential value of 2 raised to the power of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := POW(FP16(2.0), a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := POW(FP16(2.0), a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the exponential value of "e" raised to the power of packed - half-precision (16-bit) floating-point elements in "a", and store the results in "dst". + Compute the exponential value of "e" raised to the power of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := POW(FP16(e), a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := POW(FP16(e), a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the exponential value of "e" raised to the power of packed - half-precision (16-bit) floating-point elements in "a", subtract one from each element, - and store the results in "dst". + Compute the exponential value of "e" raised to the power of packed half-precision (16-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := POW(FP16(e), a[i+15:i]) - 1.0 - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := POW(FP16(e), a[i+15:i]) - 1.0 +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the length of the hypotenous of a right triangle, with the lengths of - the other two sides of the triangle stored as packed half-precision (16-bit) - floating-point elements in "a" and "b", and store the results in "dst". + Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := SQRT(POW(a[i+15:i], 2.0) + POW(b[i+15:i], 2.0)) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := SQRT(POW(a[i+15:i], 2.0) + POW(b[i+15:i], 2.0)) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse cube root of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst". + Compute the inverse cube root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := InvCubeRoot(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math FunctionsFOR j := 0 to 15 + i := j*16 + dst[i+15:i] := InvCubeRoot(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse square root of packed half-precision (16-bit) - floating-point elements in "a", and store the results in "dst". + Compute the inverse square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := InvSQRT(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math FunctionsFOR j := 0 to 15 + i := j*16 + dst[i+15:i] := InvSQRT(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the base-10 logarithm of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst". + Compute the base-10 logarithm of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := LOG(a[i+15:i]) / LOG(10.0) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := LOG(a[i+15:i]) / LOG(10.0) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the natural logarithm of one plus packed half-precision (16-bit) - floating-point elements in "a", and store the results in "dst". + Compute the natural logarithm of one plus packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := LOG(1.0 + a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := LOG(1.0 + a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the base-2 logarithm of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst". + Compute the base-2 logarithm of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := LOG(a[i+15:i]) / LOG(2.0) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := LOG(a[i+15:i]) / LOG(2.0) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the natural logarithm of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst". + Compute the natural logarithm of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := LOG(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := LOG(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Convert the exponent of each packed half-precision (16-bit) floating-point - element in "a" to a half-precision floating-point number representing the integer - exponent, and store the results in "dst". This intrinsic essentially calculates - "floor(log2(x))" for each element. + Convert the exponent of each packed half-precision (16-bit) floating-point element in "a" to a half-precision floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. - Elementary Math Functions - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := ConvertExpFP16(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math FunctionsFOR j := 0 to 15 + i := j*16 + dst[i+15:i] := ConvertExpFP16(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the exponential value of packed half-precision (16-bit) floating-point - elements in "a" raised by packed elements in "b", and store the results in "dst". + Compute the exponential value of packed half-precision (16-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := POW(a[i+15:i], b[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := POW(a[i+15:i], b[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the sine of packed half-precision (16-bit) floating-point elements in - "a" expressed in radians, and store the results in "dst". + Compute the sine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := SIN(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := SIN(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the sine and cosine of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, store the sine in "dst", and store the cosine into - memory at "mem_addr". + Compute the sine and cosine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, store the sine in "dst", and store the cosine into memory at "mem_addr". - Trigonometry - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := SIN(a[i+15:i]) - MEM[mem_addr+i+15:mem_addr+i] := COS(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - cos_res[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := SIN(a[i+15:i]) + MEM[mem_addr+i+15:mem_addr+i] := COS(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +cos_res[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the sine of packed half-precision (16-bit) floating-point elements in - "a" expressed in degrees, and store the results in "dst". + Compute the sine of packed half-precision (16-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". - Trigonometry - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := SIND(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ TrigonometryFOR j := 0 to 15 + i := j*16 + dst[i+15:i] := SIND(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the hyperbolic sine of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". + Compute the hyperbolic sine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := SINH(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := SINH(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Round the packed half-precision (16-bit) floating-point elements in "a" up to - an integer value, and store the results as packed half-precision floating-point elements - in "dst". + Round the packed half-precision (16-bit) floating-point elements in "a" up to an integer value, and store the results as packed half-precision floating-point elements in "dst". - Special Math Functions - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := CEIL(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Special Math Functions +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := CEIL(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Round the packed half-precision (16-bit) floating-point elements in "a" down to - an integer value, and store the results as packed half-precision floating-point elements - in "dst". + Round the packed half-precision (16-bit) floating-point elements in "a" down to an integer value, and store the results as packed half-precision floating-point elements in "dst". - Special Math Functions - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := FLOOR(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Special Math Functions +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := FLOOR(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Round the packed half-precision (16-bit) floating-point elements in "a" to the - nearest integer value, and store the results as packed half-precision floating-point - elements in "dst". + Round the packed half-precision (16-bit) floating-point elements in "a" to the nearest integer value, and store the results as packed half-precision floating-point elements in "dst". - Special Math Functions - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := ROUND(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Special Math Functions +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := ROUND(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the square root of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst". Note that this intrinsic is less - efficient than "_mm_sqrt_ps". + Compute the square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". Note that this intrinsic is less efficient than "_mm_sqrt_ps". - Elementary Math Functions - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := SQRT(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := SQRT(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the tangent of packed half-precision (16-bit) floating-point elements - in "a" expressed in radians, and store the results in "dst". + Compute the tangent of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := TAN(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := TAN(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the tangent of packed half-precision (16-bit) floating-point elements - in "a" expressed in degrees, and store the results in "dst". + Compute the tangent of packed half-precision (16-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". - Trigonometry - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := TAND(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ TrigonometryFOR j := 0 to 15 + i := j*16 + dst[i+15:i] := TAND(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the hyperbolic tangent of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". + Compute the hyperbolic tangent of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := TANH(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := TANH(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Truncate the packed half-precision (16-bit) floating-point elements in "a", and - store the results as packed half-precision floating-point elements in "dst" + Truncate the packed half-precision (16-bit) floating-point elements in "a", and store the results as packed half-precision floating-point elements in "dst" - Special Math Functions - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := TRUNCATE(a[i+15:i]) - ENDFOR - dst[MAX:256] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Special Math FunctionsFOR j := 0 to 15 + i := j*16 + dst[i+15:i] := TRUNCATE(a[i+15:i]) +ENDFOR +dst[MAX:256] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse cosine of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". + Compute the inverse cosine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := ACOS(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := ACOS(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse hyperbolic cosine of packed half-precision (16-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". + Compute the inverse hyperbolic cosine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := ACOSH(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := ACOSH(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse sine of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". + Compute the inverse sine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := ASIN(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := ASIN(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse hyperbolic sine of packed half-precision (16-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". + Compute the inverse hyperbolic sine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := ASINH(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := ASINH(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse tangent of packed half-precision (16-bit) floating-point - elements in "a" divided by packed elements in "b", and store the results in "dst" - expressed in radians. + Compute the inverse tangent of packed half-precision (16-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians. - Trigonometry - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := ATAN2(a[i+15:i], b[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := ATAN2(a[i+15:i], b[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse tangent of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst" expressed in radians. + Compute the inverse tangent of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" expressed in radians. - Trigonometry - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := ATAN(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := ATAN(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse hyperblic tangent of packed half-precision (16-bit) - floating-point elements in "a", and store the results in "dst" expressed in radians. + Compute the inverse hyperblic tangent of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" expressed in radians. - Trigonometry - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := ATANH(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := ATANH(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the cube root of packed half-precision (16-bit) floating-point elements - in "a", and store the results in "dst". + Compute the cube root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := CubeRoot(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math FunctionsFOR j := 0 to 31 + i := j*16 + dst[i+15:i] := CubeRoot(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the cumulative distribution function of packed half-precision (16-bit) - floating-point elements in "a" using the normal distribution, and store the results in - "dst". + Compute the cumulative distribution function of packed half-precision (16-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". - Probability/Statistics - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := CDFNormal(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Probability/StatisticsFOR j := 0 to 31 + i := j*16 + dst[i+15:i] := CDFNormal(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse cumulative distribution function of packed half-precision - (16-bit) floating-point elements in "a" using the normal distribution, and store the - results in "dst". + Compute the inverse cumulative distribution function of packed half-precision (16-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". - Probability/Statistics - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := InverseCDFNormal(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Probability/StatisticsFOR j := 0 to 31 + i := j*16 + dst[i+15:i] := InverseCDFNormal(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Round the packed half-precision (16-bit) floating-point elements in "a" up to - an integer value, and store the results as packed half-precision floating-point elements - in "dst". + Round the packed half-precision (16-bit) floating-point elements in "a" up to an integer value, and store the results as packed half-precision floating-point elements in "dst". - Special Math Functions - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := CEIL(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Special Math Functions +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := CEIL(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the cosine of packed half-precision (16-bit) floating-point elements in - "a" expressed in radians, and store the results in "dst". + Compute the cosine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := COS(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := COS(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the cosine of packed half-precision (16-bit) floating-point elements in - "a" expressed in degrees, and store the results in "dst". + Compute the cosine of packed half-precision (16-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". - Trigonometry - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := COSD(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ TrigonometryFOR j := 0 to 31 + i := j*16 + dst[i+15:i] := COSD(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the hyperbolic cosine of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". + Compute the hyperbolic cosine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := COSH(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := COSH(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the error function of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst". + Compute the error function of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Probability/Statistics - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := ERF(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Probability/StatisticsFOR j := 0 to 31 + i := j*16 + dst[i+15:i] := ERF(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the complementary error function of packed half-precision (16-bit) - floating-point elements in "a", and store the results in "dst". + Compute the complementary error function of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Probability/Statistics - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := 1.0 - ERF(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Probability/StatisticsFOR j := 0 to 31 + i := j*16 + dst[i+15:i] := 1.0 - ERF(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse complementary error function of packed half-precision - (16-bit) floating-point elements in "a", and store the results in "dst". + Compute the inverse complementary error function of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Probability/Statistics - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := 1.0 / (1.0 - ERF(a[i+15:i])) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Probability/StatisticsFOR j := 0 to 31 + i := j*16 + dst[i+15:i] := 1.0 / (1.0 - ERF(a[i+15:i])) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse error function of packed half-precision (16-bit) - floating-point elements in "a", and store the results in "dst". + Compute the inverse error function of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Probability/Statistics - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := 1.0 / ERF(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Probability/StatisticsFOR j := 0 to 31 + i := j*16 + dst[i+15:i] := 1.0 / ERF(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the exponential value of 10 raised to the power of packed - half-precision (16-bit) floating-point elements in "a", and store the results in "dst". + Compute the exponential value of 10 raised to the power of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := POW(FP16(10.0), a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := POW(FP16(10.0), a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the exponential value of 2 raised to the power of packed half-precision - (16-bit) floating-point elements in "a", and store the results in "dst". + Compute the exponential value of 2 raised to the power of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := POW(FP16(2.0), a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := POW(FP16(2.0), a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the exponential value of "e" raised to the power of packed - half-precision (16-bit) floating-point elements in "a", and store the results in "dst". + Compute the exponential value of "e" raised to the power of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := POW(FP16(e), a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := POW(FP16(e), a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the exponential value of "e" raised to the power of packed - half-precision (16-bit) floating-point elements in "a", subtract one from each element, - and store the results in "dst". + Compute the exponential value of "e" raised to the power of packed half-precision (16-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := POW(FP16(e), a[i+15:i]) - 1.0 - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := POW(FP16(e), a[i+15:i]) - 1.0 +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Round the packed half-precision (16-bit) floating-point elements in "a" down to - an integer value, and store the results as packed half-precision floating-point elements - in "dst". + Round the packed half-precision (16-bit) floating-point elements in "a" down to an integer value, and store the results as packed half-precision floating-point elements in "dst". - Special Math Functions - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := FLOOR(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Special Math Functions +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := FLOOR(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the length of the hypotenous of a right triangle, with the lengths of - the other two sides of the triangle stored as packed half-precision (16-bit) - floating-point elements in "a" and "b", and store the results in "dst". + Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := SQRT(POW(a[i+15:i], 2.0) + POW(b[i+15:i], 2.0)) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := SQRT(POW(a[i+15:i], 2.0) + POW(b[i+15:i], 2.0)) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse square root of packed half-precision (16-bit) - floating-point elements in "a", and store the results in "dst". + Compute the inverse square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := InvSQRT(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math FunctionsFOR j := 0 to 31 + i := j*16 + dst[i+15:i] := InvSQRT(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the base-10 logarithm of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst". + Compute the base-10 logarithm of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := LOG(a[i+15:i]) / LOG(10.0) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := LOG(a[i+15:i]) / LOG(10.0) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the natural logarithm of one plus packed half-precision (16-bit) - floating-point elements in "a", and store the results in "dst". + Compute the natural logarithm of one plus packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := LOG(1.0 + a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := LOG(1.0 + a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the base-2 logarithm of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst". + Compute the base-2 logarithm of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := LOG(a[i+15:i]) / LOG(2.0) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := LOG(a[i+15:i]) / LOG(2.0) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the natural logarithm of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst". + Compute the natural logarithm of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := LOG(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := LOG(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Convert the exponent of each packed half-precision (16-bit) floating-point - element in "a" to a half-precision floating-point number representing the integer - exponent, and store the results in "dst". This intrinsic essentially calculates - "floor(log2(x))" for each element. + Convert the exponent of each packed half-precision (16-bit) floating-point element in "a" to a half-precision floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. - Elementary Math Functions - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := ConvertExpFP16(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math FunctionsFOR j := 0 to 31 + i := j*16 + dst[i+15:i] := ConvertExpFP16(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse cosine of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). + Compute the inverse cosine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Trigonometry - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := ACOS(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := ACOS(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse hyperbolic cosine of packed half-precision (16-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). + Compute the inverse hyperbolic cosine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Trigonometry - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := ACOSH(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := ACOSH(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse sine of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). + Compute the inverse sine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Trigonometry - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := ASIN(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := ASIN(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse hyperbolic sine of packed half-precision (16-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). + Compute the inverse hyperbolic sine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Trigonometry - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := ASINH(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := ASINH(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse tangent of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). + Compute the inverse tangent of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Trigonometry - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := ATAN(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := ATAN(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse hyperbolic tangent of packed half-precision (16-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). + Compute the inverse hyperbolic tangent of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Trigonometry - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := ATANH(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := ATANH(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the cube root of packed half-precision (16-bit) floating-point elements - in "a", and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). + Compute the cube root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Elementary Math Functions - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := CubeRoot(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math FunctionsFOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := CubeRoot(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the cumulative distribution function of packed half-precision (16-bit) - floating-point elements in "a" using the normal distribution, and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). + Compute the cumulative distribution function of packed half-precision (16-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Probability/Statistics - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := CDFNormal(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Probability/StatisticsFOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := CDFNormal(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse cumulative distribution function of packed half-precision - (16-bit) floating-point elements in "a" using the normal distribution, and store the - results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). + Compute the inverse cumulative distribution function of packed half-precision (16-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Probability/Statistics - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := InverseCDFNormal(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Probability/StatisticsFOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := InverseCDFNormal(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Round the packed half-precision (16-bit) floating-point elements in "a" up to - an integer value, and store the results as packed half-precision floating-point elements - in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). + Round the packed half-precision (16-bit) floating-point elements in "a" up to an integer value, and store the results as packed half-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Special Math Functions - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := CEIL(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Special Math Functions +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := CEIL(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the cosine of packed half-precision (16-bit) floating-point elements in - "a" expressed in radians, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). + Compute the cosine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Trigonometry - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := COS(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := COS(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the cosine of packed half-precision (16-bit) floating-point elements in - "a" expressed in degrees, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). + Compute the cosine of packed half-precision (16-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Trigonometry - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := COSD(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ TrigonometryFOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := COSD(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the hyperbolic cosine of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). + Compute the hyperbolic cosine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Trigonometry - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := COSH(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := COSH(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the error function of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). + Compute the error function of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Probability/Statistics - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := ERF(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Probability/StatisticsFOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := ERF(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the complementary error function of packed half-precision (16-bit) - floating-point elements in "a", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). + Compute the complementary error function of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Probability/Statistics - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := 1.0 - ERF(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Probability/StatisticsFOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := 1.0 - ERF(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse complementary error function of packed half-precision - (16-bit) floating-point elements in "a", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). + Compute the inverse complementary error function of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Probability/Statistics - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := 1.0 / (1.0 - ERF(a[i+15:i])) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Probability/StatisticsFOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := 1.0 / (1.0 - ERF(a[i+15:i])) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse error function of packed half-precision (16-bit) - floating-point elements in "a", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). + Compute the inverse error function of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Probability/Statistics - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := 1.0 / ERF(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Probability/StatisticsFOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := 1.0 / ERF(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the exponential value of 10 raised to the power of packed - half-precision (16-bit) floating-point elements in "a", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). + Compute the exponential value of 10 raised to the power of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Elementary Math Functions - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := POW(FP16(10.0), a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := POW(FP16(10.0), a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the exponential value of 2 raised to the power of packed half-precision - (16-bit) floating-point elements in "a", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). + Compute the exponential value of 2 raised to the power of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Elementary Math Functions - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := POW(FP16(2.0), a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := POW(FP16(2.0), a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the exponential value of "e" raised to the power of packed - half-precision (16-bit) floating-point elements in "a", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). + Compute the exponential value of "e" raised to the power of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Elementary Math Functions - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := POW(FP16(e), a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := POW(FP16(e), a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the exponential value of "e" raised to the power of packed - half-precision (16-bit) floating-point elements in "a", subtract one from each element, - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). + Compute the exponential value of "e" raised to the power of packed half-precision (16-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Elementary Math Functions - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := POW(FP16(e), a[i+15:i]) - 1.0 - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := POW(FP16(e), a[i+15:i]) - 1.0 + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Round the packed half-precision (16-bit) floating-point elements in "a" down to - an integer value, and store the results as packed half-precision floating-point elements - in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). + Round the packed half-precision (16-bit) floating-point elements in "a" down to an integer value, and store the results as packed half-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Special Math Functions - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := FLOOR(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Special Math Functions +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := FLOOR(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse square root of packed half-precision (16-bit) - floating-point elements in "a", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). + Compute the inverse square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Elementary Math Functions - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := InvSQRT(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math FunctionsFOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := InvSQRT(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the base-10 logarithm of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). + Compute the base-10 logarithm of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Elementary Math Functions - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := LOG(a[i+15:i]) / LOG(10.0) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := LOG(a[i+15:i]) / LOG(10.0) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the natural logarithm of one plus packed half-precision (16-bit) - floating-point elements in "a", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). + Compute the natural logarithm of one plus packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Elementary Math Functions - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := LOG(1.0 + a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := LOG(1.0 + a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the base-2 logarithm of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). + Compute the base-2 logarithm of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Elementary Math Functions - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := LOG(a[i+15:i]) / LOG(2.0) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := LOG(a[i+15:i]) / LOG(2.0) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the natural logarithm of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). + Compute the natural logarithm of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Elementary Math Functions - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := LOG(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := LOG(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Convert the exponent of each packed half-precision (16-bit) floating-point - element in "a" to a half-precision floating-point number representing the integer - exponent, and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). This intrinsic essentially calculates - "floor(log2(x))" for each element. + Convert the exponent of each packed half-precision (16-bit) floating-point element in "a" to a half-precision floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. - Elementary Math Functions - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := ConvertExpFP16(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math FunctionsFOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := ConvertExpFP16(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Rounds each packed half-precision (16-bit) floating-point element in "a" to the - nearest integer value and stores the results as packed half-precision floating-point - elements in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). + Rounds each packed half-precision (16-bit) floating-point element in "a" to the nearest integer value and stores the results as packed half-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Special Math Functions - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := NearbyInt(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Special Math FunctionsFOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := NearbyInt(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Computes the reciprocal of packed half-precision (16-bit) floating-point - elements in "a", storing the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). + Computes the reciprocal of packed half-precision (16-bit) floating-point elements in "a", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Elementary Math Functions - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := (1.0 / a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := (1.0 / a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Rounds the packed half-precision (16-bit) floating-point elements in "a" to the - nearest even integer value and stores the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). + Rounds the packed half-precision (16-bit) floating-point elements in "a" to the nearest even integer value and stores the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Special Math Functions - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := RoundToNearestEven(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Special Math FunctionsFOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := RoundToNearestEven(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the sine of packed half-precision (16-bit) floating-point elements in - "a" expressed in radians, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). + Compute the sine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Trigonometry - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := SIN(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := SIN(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the sine and cosine of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, store the sine in "dst", store the cosine into - memory at "mem_addr". Elements are written to their respective locations using writemask - "k" (elements are copied from "sin_src" or "cos_src" when the corresponding mask bit is - not set). + Compute the sine and cosine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, store the sine in "dst", store the cosine into memory at "mem_addr". Elements are written to their respective locations using writemask "k" (elements are copied from "sin_src" or "cos_src" when the corresponding mask bit is not set). - Trigonometry - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := SIN(a[i+15:i]) - MEM[mem_addr+i+15:mem_addr+i] := COS(a[i+15:i]) - ELSE - dst[i+15:i] := sin_src[i+15:i] - MEM[mem_addr+i+15:mem_addr+i] := cos_src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - cos_res[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := SIN(a[i+15:i]) + MEM[mem_addr+i+15:mem_addr+i] := COS(a[i+15:i]) + ELSE + dst[i+15:i] := sin_src[i+15:i] + MEM[mem_addr+i+15:mem_addr+i] := cos_src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +cos_res[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the sine of packed half-precision (16-bit) floating-point elements in - "a" expressed in degrees, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). + Compute the sine of packed half-precision (16-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Trigonometry - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := SIND(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ TrigonometryFOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := SIND(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the hyperbolic sine of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). + Compute the hyperbolic sine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Trigonometry - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := SINH(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := SINH(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Round the packed half-precision (16-bit) floating-point elements in "a" to the - nearest integer value, and store the results as packed half-precision floating-point - elements in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). + Round the packed half-precision (16-bit) floating-point elements in "a" to the nearest integer value, and store the results as packed half-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Special Math Functions - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := ROUND(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Special Math Functions +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := ROUND(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the tangent of packed half-precision (16-bit) floating-point elements - in "a" expressed in radians, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). + Compute the tangent of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Trigonometry - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := TAN(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := TAN(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the tangent of packed half-precision (16-bit) floating-point elements - in "a" expressed in degrees, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). + Compute the tangent of packed half-precision (16-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Trigonometry - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := TAND(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ TrigonometryFOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := TAND(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the hyperbolic tangent of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). + Compute the hyperbolic tangent of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Trigonometry - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := TANH(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := TANH(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Truncate the packed half-precision (16-bit) floating-point elements in "a", and - store the results as packed half-precision floating-point elements in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). + Truncate the packed half-precision (16-bit) floating-point elements in "a", and store the results as packed half-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). - Special Math Functions - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := TRUNCATE(a[i+15:i]) - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Special Math FunctionsFOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := TRUNCATE(a[i+15:i]) + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Rounds each packed half-precision (16-bit) floating-point element in "a" to the - nearest integer value and stores the results as packed half-precision floating-point - elements in "dst". + Rounds each packed half-precision (16-bit) floating-point element in "a" to the nearest integer value and stores the results as packed half-precision floating-point elements in "dst". - Special Math Functions - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := NearbyInt(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Special Math FunctionsFOR j := 0 to 31 + i := j*16 + dst[i+15:i] := NearbyInt(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the exponential value of packed half-precision (16-bit) floating-point - elements in "a" raised by packed elements in "b", and store the results in "dst". + Compute the exponential value of packed half-precision (16-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := POW(a[i+15:i], b[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := POW(a[i+15:i], b[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Computes the reciprocal of packed half-precision (16-bit) floating-point - elements in "a", storing the results in "dst". + Computes the reciprocal of packed half-precision (16-bit) floating-point elements in "a", storing the results in "dst". - Elementary Math Functions - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := (1.0 / a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := (1.0 / a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Rounds the packed half-precision (16-bit) floating-point elements in "a" to the - nearest even integer value and stores the results in "dst". + Rounds the packed half-precision (16-bit) floating-point elements in "a" to the nearest even integer value and stores the results in "dst". - Special Math Functions - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := RoundToNearestEven(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Special Math FunctionsFOR j := 0 to 31 + i := j*16 + dst[i+15:i] := RoundToNearestEven(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the sine of packed half-precision (16-bit) floating-point elements in - "a" expressed in radians, and store the results in "dst". + Compute the sine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := SIN(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := SIN(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the sine and cosine of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, store the sine in "dst", and store the cosine into - memory at "mem_addr". + Compute the sine and cosine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, store the sine in "dst", and store the cosine into memory at "mem_addr". - Trigonometry - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := SIN(a[i+15:i]) - MEM[mem_addr+i+15:mem_addr+i] := COS(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - cos_res[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := SIN(a[i+15:i]) + MEM[mem_addr+i+15:mem_addr+i] := COS(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +cos_res[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the sine of packed half-precision (16-bit) floating-point elements in - "a" expressed in degrees, and store the results in "dst". + Compute the sine of packed half-precision (16-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". - Trigonometry - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := SIND(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ TrigonometryFOR j := 0 to 31 + i := j*16 + dst[i+15:i] := SIND(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the hyperbolic sine of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". + Compute the hyperbolic sine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := SINH(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := SINH(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Round the packed half-precision (16-bit) floating-point elements in "a" to the - nearest integer value, and store the results as packed half-precision floating-point - elements in "dst". + Round the packed half-precision (16-bit) floating-point elements in "a" to the nearest integer value, and store the results as packed half-precision floating-point elements in "dst". - Special Math Functions - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := ROUND(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Special Math Functions +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := ROUND(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the tangent of packed half-precision (16-bit) floating-point elements - in "a" expressed in radians, and store the results in "dst". + Compute the tangent of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := TAN(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := TAN(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the tangent of packed half-precision (16-bit) floating-point elements - in "a" expressed in degrees, and store the results in "dst". + Compute the tangent of packed half-precision (16-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". - Trigonometry - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := TAND(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ TrigonometryFOR j := 0 to 31 + i := j*16 + dst[i+15:i] := TAND(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the hyperbolic tangent of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". + Compute the hyperbolic tangent of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := TANH(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := TANH(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Truncate the packed half-precision (16-bit) floating-point elements in "a", and - store the results as packed half-precision floating-point elements in "dst". + Truncate the packed half-precision (16-bit) floating-point elements in "a", and store the results as packed half-precision floating-point elements in "dst". - Special Math Functions - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := TRUNCATE(a[i+15:i]) - ENDFOR - dst[MAX:512] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Special Math FunctionsFOR j := 0 to 31 + i := j*16 + dst[i+15:i] := TRUNCATE(a[i+15:i]) +ENDFOR +dst[MAX:512] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse cosine of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". + Compute the inverse cosine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := ACOS(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := ACOS(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse hyperbolic cosine of packed half-precision (16-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". + Compute the inverse hyperbolic cosine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := ACOSH(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := ACOSH(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse sine of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". + Compute the inverse sine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := ASIN(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := ASIN(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse hyperbolic sine of packed half-precision (16-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". + Compute the inverse hyperbolic sine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := ASINH(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := ASINH(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse tangent of packed half-precision (16-bit) floating-point - elements in "a" divided by packed elements in "b", and store the results in "dst" - expressed in radians. + Compute the inverse tangent of packed half-precision (16-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians. - Trigonometry - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := ATAN2(a[i+15:i], b[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := ATAN2(a[i+15:i], b[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse tangent of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". + Compute the inverse tangent of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := ATAN(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := ATAN(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse hyperbolic tangent of packed half-precision (16-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". + Compute the inverse hyperbolic tangent of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := ATANH(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := ATANH(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the cube root of packed half-precision (16-bit) floating-point elements - in "a", and store the results in "dst". + Compute the cube root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := CubeRoot(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math FunctionsFOR j := 0 to 7 + i := j*16 + dst[i+15:i] := CubeRoot(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the cumulative distribution function of packed half-precision (16-bit) - floating-point elements in "a" using the normal distribution, and store the results in - "dst". + Compute the cumulative distribution function of packed half-precision (16-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". - Probability/Statistics - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := CDFNormal(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Probability/StatisticsFOR j := 0 to 7 + i := j*16 + dst[i+15:i] := CDFNormal(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse cumulative distribution function of packed half-precision - (16-bit) floating-point elements in "a" using the normal distribution, and store the - results in "dst". + Compute the inverse cumulative distribution function of packed half-precision (16-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". - Probability/Statistics - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := InverseCDFNormal(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Probability/StatisticsFOR j := 0 to 7 + i := j*16 + dst[i+15:i] := InverseCDFNormal(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the cosine of packed half-precision (16-bit) floating-point elements in - "a" expressed in radians, and store the results in "dst". + Compute the cosine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := COS(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := COS(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the cosine of packed half-precision (16-bit) floating-point elements in - "a" expressed in degrees, and store the results in "dst". + Compute the cosine of packed half-precision (16-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". - Trigonometry - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := COSD(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ TrigonometryFOR j := 0 to 7 + i := j*16 + dst[i+15:i] := COSD(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the hyperbolic cosine of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". + Compute the hyperbolic cosine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := COSH(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := COSH(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the error function of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst". + Compute the error function of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Probability/Statistics - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := ERF(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Probability/StatisticsFOR j := 0 to 7 + i := j*16 + dst[i+15:i] := ERF(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the complementary error function of packed half-precision (16-bit) - floating-point elements in "a", and store the results in "dst". + Compute the complementary error function of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Probability/Statistics - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := 1.0 - ERF(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Probability/StatisticsFOR j := 0 to 7 + i := j*16 + dst[i+15:i] := 1.0 - ERF(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse complementary error function of packed half-precision - (16-bit) floating-point elements in "a", and store the results in "dst". + Compute the inverse complementary error function of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Probability/Statistics - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := 1.0 / (1.0 - ERF(a[i+15:i])) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Probability/StatisticsFOR j := 0 to 7 + i := j*16 + dst[i+15:i] := 1.0 / (1.0 - ERF(a[i+15:i])) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse error function of packed half-precision (16-bit) - floating-point elements in "a", and store the results in "dst". + Compute the inverse error function of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Probability/Statistics - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := 1.0 / ERF(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Probability/StatisticsFOR j := 0 to 7 + i := j*16 + dst[i+15:i] := 1.0 / ERF(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the exponential value of 10 raised to the power of packed - half-precision (16-bit) floating-point elements in "a", and store the results in "dst". + Compute the exponential value of 10 raised to the power of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := POW(FP16(10.0), a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := POW(FP16(10.0), a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the exponential value of 2 raised to the power of packed half-precision - (16-bit) floating-point elements in "a", and store the results in "dst". + Compute the exponential value of 2 raised to the power of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := POW(FP16(2.0), a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := POW(FP16(2.0), a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the exponential value of "e" raised to the power of packed - half-precision (16-bit) floating-point elements in "a", and store the results in "dst". + Compute the exponential value of "e" raised to the power of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := POW(FP16(e), a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := POW(FP16(e), a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the exponential value of "e" raised to the power of packed - half-precision (16-bit) floating-point elements in "a", subtract one from each element, - and store the results in "dst". + Compute the exponential value of "e" raised to the power of packed half-precision (16-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := POW(FP16(e), a[i+15:i]) - 1.0 - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := POW(FP16(e), a[i+15:i]) - 1.0 +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the length of the hypotenous of a right triangle, with the lengths of - the other two sides of the triangle stored as packed half-precision (16-bit) - floating-point elements in "a" and "b", and store the results in "dst". + Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := SQRT(POW(a[i+15:i], 2.0) + POW(b[i+15:i], 2.0)) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := SQRT(POW(a[i+15:i], 2.0) + POW(b[i+15:i], 2.0)) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse cube root of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst". + Compute the inverse cube root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := InvCubeRoot(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math FunctionsFOR j := 0 to 7 + i := j*16 + dst[i+15:i] := InvCubeRoot(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the inverse square root of packed half-precision (16-bit) - floating-point elements in "a", and store the results in "dst". + Compute the inverse square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := InvSQRT(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math FunctionsFOR j := 0 to 7 + i := j*16 + dst[i+15:i] := InvSQRT(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the base-10 logarithm of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst". + Compute the base-10 logarithm of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := LOG(a[i+15:i]) / LOG(10.0) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := LOG(a[i+15:i]) / LOG(10.0) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the natural logarithm of one plus packed half-precision (16-bit) - floating-point elements in "a", and store the results in "dst". + Compute the natural logarithm of one plus packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := LOG(1.0 + a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := LOG(1.0 + a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the base-2 logarithm of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst". + Compute the base-2 logarithm of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := LOG(a[i+15:i]) / LOG(2.0) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := LOG(a[i+15:i]) / LOG(2.0) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the natural logarithm of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst". + Compute the natural logarithm of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := LOG(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := LOG(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Convert the exponent of each packed half-precision (16-bit) floating-point - element in "a" to a half-precision floating-point number representing the integer - exponent, and store the results in "dst". This intrinsic essentially calculates - "floor(log2(x))" for each element. + Convert the exponent of each packed half-precision (16-bit) floating-point element in "a" to a half-precision floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. - Elementary Math Functions - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := ConvertExpFP16(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math FunctionsFOR j := 0 to 7 + i := j*16 + dst[i+15:i] := ConvertExpFP16(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the exponential value of packed half-precision (16-bit) floating-point - elements in "a" raised by packed elements in "b", and store the results in "dst". + Compute the exponential value of packed half-precision (16-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst". - Elementary Math Functions - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := POW(a[i+15:i], b[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := POW(a[i+15:i], b[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the sine of packed half-precision (16-bit) floating-point elements in - "a" expressed in radians, and store the results in "dst". + Compute the sine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := SIN(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := SIN(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the sine and cosine of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, store the sine in "dst", and store the cosine into - memory at "mem_addr". + Compute the sine and cosine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, store the sine in "dst", and store the cosine into memory at "mem_addr". - Trigonometry - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := SIN(a[i+15:i]) - MEM[mem_addr+i+15:mem_addr+i] := COS(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - cos_res[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := SIN(a[i+15:i]) + MEM[mem_addr+i+15:mem_addr+i] := COS(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +cos_res[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the sine of packed half-precision (16-bit) floating-point elements in - "a" expressed in degrees, and store the results in "dst". + Compute the sine of packed half-precision (16-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". - Trigonometry - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := SIND(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ TrigonometryFOR j := 0 to 7 + i := j*16 + dst[i+15:i] := SIND(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the hyperbolic sine of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". + Compute the hyperbolic sine of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := SINH(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := SINH(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Round the packed half-precision (16-bit) floating-point elements in "a" up to - an integer value, and store the results as packed half-precision floating-point elements - in "dst". + Round the packed half-precision (16-bit) floating-point elements in "a" up to an integer value, and store the results as packed half-precision floating-point elements in "dst". - Special Math Functions - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := CEIL(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Special Math Functions +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := CEIL(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Round the packed half-precision (16-bit) floating-point elements in "a" down to - an integer value, and store the results as packed half-precision floating-point elements - in "dst". + Round the packed half-precision (16-bit) floating-point elements in "a" down to an integer value, and store the results as packed half-precision floating-point elements in "dst". - Special Math Functions - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := FLOOR(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Special Math Functions +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := FLOOR(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Round the packed half-precision (16-bit) floating-point elements in "a" to the - nearest integer value, and store the results as packed half-precision floating-point - elements in "dst". + Round the packed half-precision (16-bit) floating-point elements in "a" to the nearest integer value, and store the results as packed half-precision floating-point elements in "dst". - Special Math Functions - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := ROUND(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Special Math Functions +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := ROUND(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the square root of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst". Note that this intrinsic is less - efficient than "_mm_sqrt_ps". + Compute the square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". Note that this intrinsic is less efficient than "_mm_sqrt_ps". - Elementary Math Functions - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := SQRT(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Elementary Math Functions +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := SQRT(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the tangent of packed half-precision (16-bit) floating-point elements - in "a" expressed in radians, and store the results in "dst". + Compute the tangent of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := TAN(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := TAN(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the tangent of packed half-precision (16-bit) floating-point elements - in "a" expressed in degrees, and store the results in "dst". + Compute the tangent of packed half-precision (16-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". - Trigonometry - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := TAND(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ TrigonometryFOR j := 0 to 7 + i := j*16 + dst[i+15:i] := TAND(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Compute the hyperbolic tangent of packed half-precision (16-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". + Compute the hyperbolic tangent of packed half-precision (16-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". - Trigonometry - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := TANH(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
+ Trigonometry +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := TANH(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16 - Truncate the packed half-precision (16-bit) floating-point elements in "a", and - store the results as packed half-precision floating-point elements in "dst". - - Special Math Functions - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := TRUNCATE(a[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - -
immintrin.h
- AVX512_FP16 -
- - + Truncate the packed half-precision (16-bit) floating-point elements in "a", and store the results as packed half-precision floating-point elements in "dst". - - Add packed half-precision (16-bit) floating-point elements in "a" and "b", and - store the results in "dst". - - FOR j := 0 TO 7 - dst.fp16[j] := a.fp16[j] + b.fp16[j] - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + Special Math FunctionsFOR j := 0 to 7 + i := j*16 + dst[i+15:i] := TRUNCATE(a[i+15:i]) +ENDFOR +dst[MAX:128] := 0 +
immintrin.h
AVX512_FP16
+ + + + + Add packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 TO 7 + dst.fp16[j] := a.fp16[j] + b.fp16[j] +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Add packed half-precision (16-bit) floating-point elements in "a" and "b", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.fp16[j] := a.fp16[j] + b.fp16[j] - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Add packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.fp16[j] := a.fp16[j] + b.fp16[j] + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Add packed half-precision (16-bit) floating-point elements in "a" and "b", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.fp16[j] := a.fp16[j] + b.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Add packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.fp16[j] := a.fp16[j] + b.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Add packed half-precision (16-bit) floating-point elements in "a" and "b", and - store the results in "dst". - - FOR j := 0 TO 15 - dst.fp16[j] := a.fp16[j] + b.fp16[j] - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + Add packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 TO 15 + dst.fp16[j] := a.fp16[j] + b.fp16[j] +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Add packed half-precision (16-bit) floating-point elements in "a" and "b", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.fp16[j] := a.fp16[j] + b.fp16[j] - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Add packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.fp16[j] := a.fp16[j] + b.fp16[j] + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Add packed half-precision (16-bit) floating-point elements in "a" and "b", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.fp16[j] := a.fp16[j] + b.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Add packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.fp16[j] := a.fp16[j] + b.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Divide packed half-precision (16-bit) floating-point elements in "a" by packed - elements in "b", and store the results in "dst". - - FOR j := 0 to 7 - dst.fp16[j] := a.fp16[j] / b.fp16[j] - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + Divide packed half-precision (16-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 7 + dst.fp16[j] := a.fp16[j] / b.fp16[j] +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Divide packed half-precision (16-bit) floating-point elements in "a" by packed - elements in "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - IF k[j] - dst.fp16[j] := a.fp16[j] / b.fp16[j] - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Divide packed half-precision (16-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + dst.fp16[j] := a.fp16[j] / b.fp16[j] + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Divide packed half-precision (16-bit) floating-point elements in "a" by packed - elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - IF k[j] - dst.fp16[j] := a.fp16[j] / b.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Divide packed half-precision (16-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + dst.fp16[j] := a.fp16[j] / b.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Divide packed half-precision (16-bit) floating-point elements in "a" by packed - elements in "b", and store the results in "dst". - - FOR j := 0 to 15 - dst.fp16[j] := a.fp16[j] / b.fp16[j] - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + Divide packed half-precision (16-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 15 + dst.fp16[j] := a.fp16[j] / b.fp16[j] +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Divide packed half-precision (16-bit) floating-point elements in "a" by packed - elements in "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - IF k[j] - dst.fp16[j] := a.fp16[j] / b.fp16[j] - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Divide packed half-precision (16-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + dst.fp16[j] := a.fp16[j] / b.fp16[j] + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Divide packed half-precision (16-bit) floating-point elements in "a" by packed - elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - IF k[j] - dst.fp16[j] := a.fp16[j] / b.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Divide packed half-precision (16-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + dst.fp16[j] := a.fp16[j] / b.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the intermediate result to packed elements in "c", and store the results in "dst". - - FOR j := 0 to 7 - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst". + +FOR j := 0 to 7 + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the intermediate result to packed elements in "c", and store the results in "dst" - using writemask "k" (elements are copied from "a" when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - IF k[j] - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the intermediate result to packed elements in "c", and store the results in "dst" - using writemask "k" (elements are copied from "c" when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - IF k[j] - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := c.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := c.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the intermediate result to packed elements in "c", and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - IF k[j] - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the intermediate result to packed elements in "c", and store the results in "dst". - - FOR j := 0 to 15 - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst". + +FOR j := 0 to 15 + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the intermediate result to packed elements in "c", and store the results in "dst" - using writemask "k" (elements are copied from "a" when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - IF k[j] - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the intermediate result to packed elements in "c", and store the results in "dst" - using writemask "k" (elements are copied from "c" when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - IF k[j] - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := c.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := c.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the intermediate result to packed elements in "c", and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - IF k[j] - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the negated intermediate result to packed elements in "c", and store the results in - "dst". - - FOR j := 0 to 7 - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst". + +FOR j := 0 to 7 + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the negated intermediate result to packed elements in "c", and store the results in - "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit - is not set). - - FOR j := 0 to 7 - IF k[j] - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the negated intermediate result to packed elements in "c", and store the results in - "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit - is not set). - - FOR j := 0 to 7 - IF k[j] - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := c.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := c.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the negated intermediate result to packed elements in "c", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - IF k[j] - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the negated intermediate result to packed elements in "c", and store the results in - "dst". - - FOR j := 0 to 15 - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst". + +FOR j := 0 to 15 + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the negated intermediate result to packed elements in "c", and store the results in - "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit - is not set). - - FOR j := 0 to 15 - IF k[j] - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the negated intermediate result to packed elements in "c", and store the results in - "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit - is not set). - - FOR j := 0 to 15 - IF k[j] - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := c.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := c.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the negated intermediate result to packed elements in "c", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - IF k[j] - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the intermediate result, and store the results in - "dst". - - FOR j := 0 to 7 - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst". + +FOR j := 0 to 7 + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the intermediate result, and store the results in - "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit - is not set). - - FOR j := 0 to 7 - IF k[j] - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the intermediate result, and store the results in - "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit - is not set). - - FOR j := 0 to 7 - IF k[j] - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := c.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := c.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the intermediate result, and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - IF k[j] - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the intermediate result, and store the results in - "dst". - - FOR j := 0 to 15 - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst". + +FOR j := 0 to 15 + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the intermediate result, and store the results in - "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit - is not set). - - FOR j := 0 to 15 - IF k[j] - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the intermediate result, and store the results in - "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit - is not set). - - FOR j := 0 to 15 - IF k[j] - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := c.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := c.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the intermediate result, and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - IF k[j] - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst". - - FOR j := 0 to 7 - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst". + +FOR j := 0 to 7 + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - IF k[j] - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using writemask "k" (elements are copied from "c" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - IF k[j] - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := c.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := c.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - IF k[j] - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst". - - FOR j := 0 to 15 - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst". + +FOR j := 0 to 15 + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - IF k[j] - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using writemask "k" (elements are copied from "c" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - IF k[j] - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := c.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := c.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - IF k[j] - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively add and subtract packed elements in "c" to/from the intermediate result, - and store the results in "dst". - - FOR j := 0 to 7 - IF ((j & 1) == 0) - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst". + +FOR j := 0 to 7 + IF ((j & 1) == 0) + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively add and subtract packed elements in "c" to/from the intermediate result, - and store the results in "dst" using writemask "k" (elements are copied from "a" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + IF ((j & 1) == 0) dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE + ELSE dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - FI - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively add and subtract packed elements in "c" to/from the intermediate result, - and store the results in "dst" using writemask "k" (elements are copied from "c" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + IF ((j & 1) == 0) dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE + ELSE dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - FI - ELSE - dst.fp16[j] := c.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst.fp16[j] := c.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively add and subtract packed elements in "c" to/from the intermediate result, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + IF ((j & 1) == 0) dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE + ELSE dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - FI - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively add and subtract packed elements in "c" to/from the intermediate result, - and store the results in "dst". - - FOR j := 0 to 15 - IF ((j & 1) == 0) - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst". + +FOR j := 0 to 15 + IF ((j & 1) == 0) + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively add and subtract packed elements in "c" to/from the intermediate result, - and store the results in "dst" using writemask "k" (elements are copied from "a" when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + IF ((j & 1) == 0) dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE + ELSE dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - FI - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively add and subtract packed elements in "c" to/from the intermediate result, - and store the results in "dst" using writemask "k" (elements are copied from "c" when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + IF ((j & 1) == 0) dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE + ELSE dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - FI - ELSE - dst.fp16[j] := c.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst.fp16[j] := c.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively add and subtract packed elements in "c" to/from the intermediate result, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + IF ((j & 1) == 0) dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE + ELSE dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - FI - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively subtract and add packed elements in "c" to/from the intermediate result, - and store the results in "dst". - - FOR j := 0 to 7 - IF ((j & 1) == 0) - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" to/from the intermediate result, and store the results in "dst". + +FOR j := 0 to 7 + IF ((j & 1) == 0) + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively subtract and add packed elements in "c" to/from the intermediate result, - and store the results in "dst" using writemask "k" (elements are copied from "a" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + IF ((j & 1) == 0) dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE + ELSE dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - FI - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively subtract and add packed elements in "c" to/from the intermediate result, - and store the results in "dst" using writemask "k" (elements are copied from "c" when - the corresponding mask bit is not set). - - FOR j := 0 to 7 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + IF ((j & 1) == 0) dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE + ELSE dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - FI - ELSE - dst.fp16[j] := c.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst.fp16[j] := c.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively subtract and add packed elements in "c" to/from the intermediate result, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + IF ((j & 1) == 0) dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE + ELSE dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - FI - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively subtract and add packed elements in "c" to/from the intermediate result, - and store the results in "dst". - - FOR j := 0 to 15 - IF ((j & 1) == 0) - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" to/from the intermediate result, and store the results in "dst". + +FOR j := 0 to 15 + IF ((j & 1) == 0) + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively subtract and add packed elements in "c" to/from the intermediate result, - and store the results in "dst" using writemask "k" (elements are copied from "a" when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + IF ((j & 1) == 0) dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE + ELSE dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - FI - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively subtract and add packed elements in "c" to/from the intermediate result, - and store the results in "dst" using writemask "k" (elements are copied from "c" when - the corresponding mask bit is not set). - - FOR j := 0 to 15 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + IF ((j & 1) == 0) dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE + ELSE dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - FI - ELSE - dst.fp16[j] := c.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst.fp16[j] := c.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively subtract and add packed elements in "c" to/from the intermediate result, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + IF ((j & 1) == 0) dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE + ELSE dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - FI - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + FI + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Subtract packed half-precision (16-bit) floating-point elements in "b" from - packed half-precision (16-bit) floating-point elements in "a", and store the results in - "dst". - - FOR j := 0 TO 7 - dst.fp16[j] := a.fp16[j] - b.fp16[j] - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + Subtract packed half-precision (16-bit) floating-point elements in "b" from packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 TO 7 + dst.fp16[j] := a.fp16[j] - b.fp16[j] +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed half-precision (16-bit) floating-point elements in "b" from - packed half-precision (16-bit) floating-point elements in "a", and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.fp16[j] := a.fp16[j] - b.fp16[j] - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Subtract packed half-precision (16-bit) floating-point elements in "b" from packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.fp16[j] := a.fp16[j] - b.fp16[j] + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Subtract packed half-precision (16-bit) floating-point elements in "b" from - packed half-precision (16-bit) floating-point elements in "a", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 TO 7 - IF k[j] - dst.fp16[j] := a.fp16[j] - b.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Subtract packed half-precision (16-bit) floating-point elements in "b" from packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.fp16[j] := a.fp16[j] - b.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Subtract packed half-precision (16-bit) floating-point elements in "b" from - packed half-precision (16-bit) floating-point elements in "a", and store the results in - "dst". - - FOR j := 0 TO 15 - dst.fp16[j] := a.fp16[j] - b.fp16[j] - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + Subtract packed half-precision (16-bit) floating-point elements in "b" from packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 TO 15 + dst.fp16[j] := a.fp16[j] - b.fp16[j] +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed half-precision (16-bit) floating-point elements in "b" from - packed half-precision (16-bit) floating-point elements in "a", and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.fp16[j] := a.fp16[j] - b.fp16[j] - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Subtract packed half-precision (16-bit) floating-point elements in "b" from packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.fp16[j] := a.fp16[j] - b.fp16[j] + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Subtract packed half-precision (16-bit) floating-point elements in "b" from - packed half-precision (16-bit) floating-point elements in "a", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 TO 15 - IF k[j] - dst.fp16[j] := a.fp16[j] - b.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Subtract packed half-precision (16-bit) floating-point elements in "b" from packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.fp16[j] := a.fp16[j] - b.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - and store the results in "dst". - - FOR i := 0 TO 7 - dst.fp16[i] := a.fp16[i] * b.fp16[i] - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR i := 0 TO 7 + dst.fp16[i] := a.fp16[i] * b.fp16[i] +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR i := 0 TO 7 - IF k[i] - dst.fp16[i] := a.fp16[i] * b.fp16[i] - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR i := 0 TO 7 + IF k[i] + dst.fp16[i] := a.fp16[i] * b.fp16[i] + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR i := 0 TO 7 - IF k[i] - dst.fp16[i] := a.fp16[i] * b.fp16[i] - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR i := 0 TO 7 + IF k[i] + dst.fp16[i] := a.fp16[i] * b.fp16[i] + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - and store the results in "dst". - - FOR i := 0 TO 15 - dst.fp16[i] := a.fp16[i] * b.fp16[i] - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR i := 0 TO 15 + dst.fp16[i] := a.fp16[i] * b.fp16[i] +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR i := 0 TO 15 - IF k[i] - dst.fp16[i] := a.fp16[i] * b.fp16[i] - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR i := 0 TO 15 + IF k[i] + dst.fp16[i] := a.fp16[i] * b.fp16[i] + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR i := 0 TO 15 - IF k[i] - dst.fp16[i] := a.fp16[i] * b.fp16[i] - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR i := 0 TO 15 + IF k[i] + dst.fp16[i] := a.fp16[i] * b.fp16[i] + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Multiply packed complex numbers in "a" and "b", and store the results in "dst". - Each complex number is composed of two adjacent half-precision (16-bit) floating-point - elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - FOR i := 0 to 3 - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + Multiply packed complex numbers in "a" and "b", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 3 + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Multiply packed complex numbers in "a" and "b", and store the results in "dst". - Each complex number is composed of two adjacent half-precision (16-bit) floating-point - elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - FOR i := 0 to 3 - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + Multiply packed complex numbers in "a" and "b", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 3 + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" and "b", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). Each complex number is composed of two adjacent half-precision (16-bit) - floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * - vec.fp16[1]". - - FOR i := 0 to 3 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := src.fp16[2*i+0] - dst.fp16[2*i+1] := src.fp16[2*i+1] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 3 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := src.fp16[2*i+0] + dst.fp16[2*i+1] := src.fp16[2*i+1] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" and "b", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). Each complex number is composed of two adjacent half-precision (16-bit) - floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * - vec.fp16[1]". - - FOR i := 0 to 3 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := src.fp16[2*i+0] - dst.fp16[2*i+1] := src.fp16[2*i+1] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 3 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := src.fp16[2*i+0] + dst.fp16[2*i+1] := src.fp16[2*i+1] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed complex numbers in "a" and "b", and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - Each complex number is composed of two adjacent half-precision (16-bit) floating-point - elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - FOR i := 0 to 3 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := 0 - dst.fp16[2*i+1] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed complex numbers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 3 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := 0 + dst.fp16[2*i+1] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed complex numbers in "a" and "b", and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - Each complex number is composed of two adjacent half-precision (16-bit) floating-point - elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - FOR i := 0 to 3 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := 0 - dst.fp16[2*i+1] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed complex numbers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 3 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := 0 + dst.fp16[2*i+1] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Multiply packed complex numbers in "a" and "b", and store the results in "dst". - Each complex number is composed of two adjacent half-precision (16-bit) floating-point - elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - FOR i := 0 to 7 - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + Multiply packed complex numbers in "a" and "b", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 7 + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Multiply packed complex numbers in "a" and "b", and store the results in "dst". - Each complex number is composed of two adjacent half-precision (16-bit) floating-point - elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - FOR i := 0 to 7 - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + Multiply packed complex numbers in "a" and "b", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 7 + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" and "b", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). Each complex number is composed of two adjacent half-precision (16-bit) - floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * - vec.fp16[1]". - - FOR i := 0 to 7 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := src.fp16[2*i+0] - dst.fp16[2*i+1] := src.fp16[2*i+1] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 7 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := src.fp16[2*i+0] + dst.fp16[2*i+1] := src.fp16[2*i+1] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" and "b", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). Each complex number is composed of two adjacent half-precision (16-bit) - floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * - vec.fp16[1]". - - FOR i := 0 to 7 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := src.fp16[2*i+0] - dst.fp16[2*i+1] := src.fp16[2*i+1] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 7 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := src.fp16[2*i+0] + dst.fp16[2*i+1] := src.fp16[2*i+1] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed complex numbers in "a" and "b", and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - Each complex number is composed of two adjacent half-precision (16-bit) floating-point - elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - FOR i := 0 to 7 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := 0 - dst.fp16[2*i+1] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed complex numbers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 7 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := 0 + dst.fp16[2*i+1] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed complex numbers in "a" and "b", and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - Each complex number is composed of two adjacent half-precision (16-bit) floating-point - elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - FOR i := 0 to 7 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := 0 - dst.fp16[2*i+1] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed complex numbers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 7 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := 0 + dst.fp16[2*i+1] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", and store the results in "dst". Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 3 - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 3 + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", and store the results in "dst". Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 3 - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 3 + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 3 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := src.fp16[2*i+0] - dst.fp16[2*i+1] := src.fp16[2*i+1] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 3 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := src.fp16[2*i+0] + dst.fp16[2*i+1] := src.fp16[2*i+1] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 3 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := src.fp16[2*i+0] - dst.fp16[2*i+1] := src.fp16[2*i+1] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 3 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := src.fp16[2*i+0] + dst.fp16[2*i+1] := src.fp16[2*i+1] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 3 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := 0 - dst.fp16[2*i+1] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 3 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := 0 + dst.fp16[2*i+1] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 3 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := 0 - dst.fp16[2*i+1] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 3 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := 0 + dst.fp16[2*i+1] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", and store the results in "dst". Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 7 - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 7 + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", and store the results in "dst". Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 7 - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 7 + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 7 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := src.fp16[2*i+0] - dst.fp16[2*i+1] := src.fp16[2*i+1] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 7 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := src.fp16[2*i+0] + dst.fp16[2*i+1] := src.fp16[2*i+1] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 7 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := src.fp16[2*i+0] - dst.fp16[2*i+1] := src.fp16[2*i+1] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 7 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := src.fp16[2*i+0] + dst.fp16[2*i+1] := src.fp16[2*i+1] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 7 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := 0 - dst.fp16[2*i+1] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 7 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := 0 + dst.fp16[2*i+1] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 7 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := 0 - dst.fp16[2*i+1] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 7 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := 0 + dst.fp16[2*i+1] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed complex numbers in "a" and "b", accumulate to the corresponding - complex numbers in "c", and store the results in "dst". Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - FOR i := 0 to 3 - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed complex numbers in "a" and "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 3 + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" and "b", accumulate to the corresponding - complex numbers in "c", and store the results in "dst" using writemask "k" (elements are - copied from "a" when the corresponding mask bit is not set). Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - FOR i := 0 to 3 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ELSE - dst.fp16[2*i+0] := a.fp16[2*i+0] - dst.fp16[2*i+1] := a.fp16[2*i+1] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" and "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 3 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] + ELSE + dst.fp16[2*i+0] := a.fp16[2*i+0] + dst.fp16[2*i+1] := a.fp16[2*i+1] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" and "b", accumulate to the corresponding - complex numbers in "c", and store the results in "dst" using writemask "k" (elements are - copied from "c" when the corresponding mask bit is not set). Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - FOR i := 0 to 3 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ELSE - dst.fp16[2*i+0] := c.fp16[2*i+0] - dst.fp16[2*i+1] := c.fp16[2*i+1] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" and "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 3 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] + ELSE + dst.fp16[2*i+0] := c.fp16[2*i+0] + dst.fp16[2*i+1] := c.fp16[2*i+1] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" and "b", accumulate to the corresponding - complex numbers in "c", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - FOR i := 0 to 3 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ELSE - dst.fp16[2*i+0] := 0 - dst.fp16[2*i+1] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" and "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 3 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] + ELSE + dst.fp16[2*i+0] := 0 + dst.fp16[2*i+1] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed complex numbers in "a" and "b", accumulate to the corresponding - complex numbers in "c", and store the results in "dst". Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - FOR i := 0 to 7 - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed complex numbers in "a" and "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 7 + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" and "b", accumulate to the corresponding - complex numbers in "c", and store the results in "dst" using writemask "k" (elements are - copied from "a" when the corresponding mask bit is not set). Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - FOR i := 0 to 7 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ELSE - dst.fp16[2*i+0] := a.fp16[2*i+0] - dst.fp16[2*i+1] := a.fp16[2*i+1] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" and "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 7 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] + ELSE + dst.fp16[2*i+0] := a.fp16[2*i+0] + dst.fp16[2*i+1] := a.fp16[2*i+1] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" and "b", accumulate to the corresponding - complex numbers in "c", and store the results in "dst" using writemask "k" (elements are - copied from "c" when the corresponding mask bit is not set). Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - FOR i := 0 to 7 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ELSE - dst.fp16[2*i+0] := c.fp16[2*i+0] - dst.fp16[2*i+1] := c.fp16[2*i+1] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" and "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 7 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] + ELSE + dst.fp16[2*i+0] := c.fp16[2*i+0] + dst.fp16[2*i+1] := c.fp16[2*i+1] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" and "b", accumulate to the corresponding - complex numbers in "c", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - FOR i := 0 to 7 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ELSE - dst.fp16[2*i+0] := 0 - dst.fp16[2*i+1] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" and "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 7 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] + ELSE + dst.fp16[2*i+0] := 0 + dst.fp16[2*i+1] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", accumulate to the corresponding complex numbers in "c", and - store the results in "dst". Each complex number is composed of two adjacent - half-precision (16-bit) floating-point elements, which defines the complex number - "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = - vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 3 - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 3 + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", accumulate to the corresponding complex numbers in "c", and - store the results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). Each complex number is composed of two adjacent - half-precision (16-bit) floating-point elements, which defines the complex number - "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = - vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 3 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ELSE - dst.fp16[2*i+0] := a.fp16[2*i+0] - dst.fp16[2*i+1] := a.fp16[2*i+1] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 3 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] + ELSE + dst.fp16[2*i+0] := a.fp16[2*i+0] + dst.fp16[2*i+1] := a.fp16[2*i+1] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", accumulate to the corresponding complex numbers in "c", and - store the results in "dst" using writemask "k" (elements are copied from "c" when the - corresponding mask bit is not set). Each complex number is composed of two adjacent - half-precision (16-bit) floating-point elements, which defines the complex number - "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = - vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 3 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ELSE - dst.fp16[2*i+0] := c.fp16[2*i+0] - dst.fp16[2*i+1] := c.fp16[2*i+1] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 3 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] + ELSE + dst.fp16[2*i+0] := c.fp16[2*i+0] + dst.fp16[2*i+1] := c.fp16[2*i+1] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", accumulate to the corresponding complex numbers in "c", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). Each complex number is composed of two adjacent - half-precision (16-bit) floating-point elements, which defines the complex number - "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = - vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 3 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ELSE - dst.fp16[2*i+0] := 0 - dst.fp16[2*i+1] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 3 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] + ELSE + dst.fp16[2*i+0] := 0 + dst.fp16[2*i+1] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", accumulate to the corresponding complex numbers in "c", and - store the results in "dst". Each complex number is composed of two adjacent - half-precision (16-bit) floating-point elements, which defines the complex number - "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = - vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 7 - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 7 + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", accumulate to the corresponding complex numbers in "c", and - store the results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). Each complex number is composed of two adjacent - half-precision (16-bit) floating-point elements, which defines the complex number - "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = - vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 7 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ELSE - dst.fp16[2*i+0] := a.fp16[2*i+0] - dst.fp16[2*i+1] := a.fp16[2*i+1] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 7 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] + ELSE + dst.fp16[2*i+0] := a.fp16[2*i+0] + dst.fp16[2*i+1] := a.fp16[2*i+1] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", accumulate to the corresponding complex numbers in "c", and - store the results in "dst" using writemask "k" (elements are copied from "c" when the - corresponding mask bit is not set). Each complex number is composed of two adjacent - half-precision (16-bit) floating-point elements, which defines the complex number - "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = - vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 7 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ELSE - dst.fp16[2*i+0] := c.fp16[2*i+0] - dst.fp16[2*i+1] := c.fp16[2*i+1] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 7 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] + ELSE + dst.fp16[2*i+0] := c.fp16[2*i+0] + dst.fp16[2*i+1] := c.fp16[2*i+1] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", accumulate to the corresponding complex numbers in "c", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). Each complex number is composed of two adjacent - half-precision (16-bit) floating-point elements, which defines the complex number - "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = - vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 7 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ELSE - dst.fp16[2*i+0] := 0 - dst.fp16[2*i+1] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 7 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] + ELSE + dst.fp16[2*i+0] := 0 + dst.fp16[2*i+1] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - Reduce the packed half-precision (16-bit) floating-point elements in "a" by - addition. Returns the sum of all elements in "a". - - tmp := a - FOR i := 0 to 7 - tmp.fp16[i] := tmp.fp16[i] + tmp.fp16[i+8] - ENDFOR - FOR i := 0 to 3 - tmp.fp16[i] := tmp.fp16[i] + tmp.fp16[i+4] - ENDFOR - FOR i := 0 to 1 - tmp.fp16[i] := tmp.fp16[i] + tmp.fp16[i+2] - ENDFOR - dst.fp16[0] := tmp.fp16[0] + tmp.fp16[1] - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + Reduce the packed half-precision (16-bit) floating-point elements in "a" by addition. Returns the sum of all elements in "a". + +tmp := a +FOR i := 0 to 7 + tmp.fp16[i] := tmp.fp16[i] + tmp.fp16[i+8] +ENDFOR +FOR i := 0 to 3 + tmp.fp16[i] := tmp.fp16[i] + tmp.fp16[i+4] +ENDFOR +FOR i := 0 to 1 + tmp.fp16[i] := tmp.fp16[i] + tmp.fp16[i+2] +ENDFOR +dst.fp16[0] := tmp.fp16[0] + tmp.fp16[1] + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - Reduce the packed half-precision (316-bit) floating-point elements in "a" by - multiplication. Returns the product of all elements in "a". - - tmp := a - FOR i := 0 to 7 - tmp.fp16[i] := tmp.fp16[i] * tmp.fp16[i+8] - ENDFOR - FOR i := 0 to 3 - tmp.fp16[i] := tmp.fp16[i] * tmp.fp16[i+4] - ENDFOR - FOR i := 0 to 1 - tmp.fp16[i] := tmp.fp16[i] * tmp.fp16[i+2] - ENDFOR - dst.fp16[0] := tmp.fp16[0] * tmp.fp16[1] - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + Reduce the packed half-precision (316-bit) floating-point elements in "a" by multiplication. Returns the product of all elements in "a". + +tmp := a +FOR i := 0 to 7 + tmp.fp16[i] := tmp.fp16[i] * tmp.fp16[i+8] +ENDFOR +FOR i := 0 to 3 + tmp.fp16[i] := tmp.fp16[i] * tmp.fp16[i+4] +ENDFOR +FOR i := 0 to 1 + tmp.fp16[i] := tmp.fp16[i] * tmp.fp16[i+2] +ENDFOR +dst.fp16[0] := tmp.fp16[0] * tmp.fp16[1] + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - Reduce the packed half-precision (16-bit) floating-point elements in "a" by - maximum. Returns the maximum of all elements in "a". - - tmp := a - FOR i := 0 to 7 - tmp.fp16[i] := (tmp.fp16[i] > tmp.fp16[i+8] ? tmp.fp16[i] : tmp.fp16[i+8]) - ENDFOR - FOR i := 0 to 3 - tmp.fp16[i] := (tmp.fp16[i] > tmp.fp16[i+4] ? tmp.fp16[i] : tmp.fp16[i+4]) - ENDFOR - FOR i := 0 to 1 - tmp.fp16[i] := (tmp.fp16[i] > tmp.fp16[i+2] ? tmp.fp16[i] : tmp.fp16[i+2]) - ENDFOR - dst.fp16[0] := (tmp.fp16[0] > tmp.fp16[1] ? tmp.fp16[0] : tmp.fp16[1]) - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + Reduce the packed half-precision (16-bit) floating-point elements in "a" by maximum. Returns the maximum of all elements in "a". + +tmp := a +FOR i := 0 to 7 + tmp.fp16[i] := (tmp.fp16[i] > tmp.fp16[i+8] ? tmp.fp16[i] : tmp.fp16[i+8]) +ENDFOR +FOR i := 0 to 3 + tmp.fp16[i] := (tmp.fp16[i] > tmp.fp16[i+4] ? tmp.fp16[i] : tmp.fp16[i+4]) +ENDFOR +FOR i := 0 to 1 + tmp.fp16[i] := (tmp.fp16[i] > tmp.fp16[i+2] ? tmp.fp16[i] : tmp.fp16[i+2]) +ENDFOR +dst.fp16[0] := (tmp.fp16[0] > tmp.fp16[1] ? tmp.fp16[0] : tmp.fp16[1]) + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - Reduce the packed half-precision (16-bit) floating-point elements in "a" by - minimum. Returns the minimum of all elements in "a". - - tmp := a - FOR i := 0 to 7 - tmp.fp16[i] := (tmp.fp16[i] < tmp.fp16[i+8] ? tmp.fp16[i] : tmp.fp16[i+8]) - ENDFOR - FOR i := 0 to 3 - tmp.fp16[i] := (tmp.fp16[i] < tmp.fp16[i+4] ? tmp.fp16[i] : tmp.fp16[i+4]) - ENDFOR - FOR i := 0 to 1 - tmp.fp16[i] := (tmp.fp16[i] < tmp.fp16[i+2] ? tmp.fp16[i] : tmp.fp16[i+2]) - ENDFOR - dst.fp16[0] := (tmp.fp16[0] < tmp.fp16[1] ? tmp.fp16[0] : tmp.fp16[1]) - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + Reduce the packed half-precision (16-bit) floating-point elements in "a" by minimum. Returns the minimum of all elements in "a". + +tmp := a +FOR i := 0 to 7 + tmp.fp16[i] := (tmp.fp16[i] < tmp.fp16[i+8] ? tmp.fp16[i] : tmp.fp16[i+8]) +ENDFOR +FOR i := 0 to 3 + tmp.fp16[i] := (tmp.fp16[i] < tmp.fp16[i+4] ? tmp.fp16[i] : tmp.fp16[i+4]) +ENDFOR +FOR i := 0 to 1 + tmp.fp16[i] := (tmp.fp16[i] < tmp.fp16[i+2] ? tmp.fp16[i] : tmp.fp16[i+2]) +ENDFOR +dst.fp16[0] := (tmp.fp16[0] < tmp.fp16[1] ? tmp.fp16[0] : tmp.fp16[1]) + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - Reduce the packed half-precision (16-bit) floating-point elements in "a" by - addition. Returns the sum of all elements in "a". - - tmp := a - FOR i := 0 to 3 - tmp.fp16[i] := tmp.fp16[i] + tmp.fp16[i+4] - ENDFOR - FOR i := 0 to 1 - tmp.fp16[i] := tmp.fp16[i] + tmp.fp16[i+2] - ENDFOR - dst.fp16[0] := tmp.fp16[0] + tmp.fp16[1] - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + Reduce the packed half-precision (16-bit) floating-point elements in "a" by addition. Returns the sum of all elements in "a". + +tmp := a +FOR i := 0 to 3 + tmp.fp16[i] := tmp.fp16[i] + tmp.fp16[i+4] +ENDFOR +FOR i := 0 to 1 + tmp.fp16[i] := tmp.fp16[i] + tmp.fp16[i+2] +ENDFOR +dst.fp16[0] := tmp.fp16[0] + tmp.fp16[1] + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - Reduce the packed half-precision (16-bit) floating-point elements in "a" by - multiplication. Returns the product of all elements in "a". - - tmp := a - FOR i := 0 to 3 - tmp.fp16[i] := tmp.fp16[i] * tmp.fp16[i+4] - ENDFOR - FOR i := 0 to 1 - tmp.fp16[i] := tmp.fp16[i] * tmp.fp16[i+2] - ENDFOR - dst.fp16[0] := tmp.fp16[0] * tmp.fp16[1] - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + Reduce the packed half-precision (16-bit) floating-point elements in "a" by multiplication. Returns the product of all elements in "a". + +tmp := a +FOR i := 0 to 3 + tmp.fp16[i] := tmp.fp16[i] * tmp.fp16[i+4] +ENDFOR +FOR i := 0 to 1 + tmp.fp16[i] := tmp.fp16[i] * tmp.fp16[i+2] +ENDFOR +dst.fp16[0] := tmp.fp16[0] * tmp.fp16[1] + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - Reduce the packed half-precision (16-bit) floating-point elements in "a" by - maximum. Returns the maximum of all elements in "a". - - tmp := a - FOR i := 0 to 3 - tmp.fp16[i] := (tmp.fp16[i] > tmp.fp16[i+4] ? tmp.fp16[i] : tmp.fp16[i+4]) - ENDFOR - FOR i := 0 to 1 - tmp.fp16[i] := (tmp.fp16[i] > tmp.fp16[i+2] ? tmp.fp16[i] : tmp.fp16[i+2]) - ENDFOR - dst.fp16[0] := (tmp.fp16[0] > tmp.fp16[1] ? tmp.fp16[0] : tmp.fp16[1]) - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + Reduce the packed half-precision (16-bit) floating-point elements in "a" by maximum. Returns the maximum of all elements in "a". + +tmp := a +FOR i := 0 to 3 + tmp.fp16[i] := (tmp.fp16[i] > tmp.fp16[i+4] ? tmp.fp16[i] : tmp.fp16[i+4]) +ENDFOR +FOR i := 0 to 1 + tmp.fp16[i] := (tmp.fp16[i] > tmp.fp16[i+2] ? tmp.fp16[i] : tmp.fp16[i+2]) +ENDFOR +dst.fp16[0] := (tmp.fp16[0] > tmp.fp16[1] ? tmp.fp16[0] : tmp.fp16[1]) + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - Reduce the packed half-precision (16-bit) floating-point elements in "a" by - minimum. Returns the minimum of all elements in "a". - - tmp := a - FOR i := 0 to 3 - tmp.fp16[i] := (tmp.fp16[i] < tmp.fp16[i+4] ? tmp.fp16[i] : tmp.fp16[i+4]) - ENDFOR - FOR i := 0 to 1 - tmp.fp16[i] := (tmp.fp16[i] < tmp.fp16[i+2] ? tmp.fp16[i] : tmp.fp16[i+2]) - ENDFOR - dst.fp16[0] := (tmp.fp16[0] < tmp.fp16[1] ? tmp.fp16[0] : tmp.fp16[1]) - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + Reduce the packed half-precision (16-bit) floating-point elements in "a" by minimum. Returns the minimum of all elements in "a". + +tmp := a +FOR i := 0 to 3 + tmp.fp16[i] := (tmp.fp16[i] < tmp.fp16[i+4] ? tmp.fp16[i] : tmp.fp16[i+4]) +ENDFOR +FOR i := 0 to 1 + tmp.fp16[i] := (tmp.fp16[i] < tmp.fp16[i+2] ? tmp.fp16[i] : tmp.fp16[i+2]) +ENDFOR +dst.fp16[0] := (tmp.fp16[0] < tmp.fp16[1] ? tmp.fp16[0] : tmp.fp16[1]) + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - Finds the absolute value of each packed half-precision (16-bit) floating-point - element in "v2", storing the results in "dst". - - FOR j := 0 to 15 - dst.fp16[j] := ABS(v2.fp16[j]) - ENDFOR - dst[MAX:256] := 0 - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + Finds the absolute value of each packed half-precision (16-bit) floating-point element in "v2", storing the results in "dst". + +FOR j := 0 to 15 + dst.fp16[j] := ABS(v2.fp16[j]) +ENDFOR +dst[MAX:256] := 0 + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - Finds the absolute value of each packed half-precision (16-bit) floating-point - element in "v2", storing the results in "dst". - - FOR j := 0 to 7 - dst.fp16[j] := ABS(v2.fp16[j]) - ENDFOR - dst[MAX:128] := 0 - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + Finds the absolute value of each packed half-precision (16-bit) floating-point element in "v2", storing the results in "dst". + +FOR j := 0 to 7 + dst.fp16[j] := ABS(v2.fp16[j]) +ENDFOR +dst[MAX:128] := 0 + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - Compute the complex conjugates of complex numbers in "a", and store the results - in "dst". Each complex number is composed of two adjacent half-precision (16-bit) - floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * - vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := a[i+31:i] XOR FP32(-0.0) - ENDFOR - dst[MAX:256] := 0 - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + Compute the complex conjugates of complex numbers in "a", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := a[i+31:i] XOR FP32(-0.0) +ENDFOR +dst[MAX:256] := 0 + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - Compute the complex conjugates of complex numbers in "a", and store the results - in "dst". Each complex number is composed of two adjacent half-precision (16-bit) - floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * - vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := a[i+31:i] XOR FP32(-0.0) - ENDFOR - dst[MAX:128] := 0 - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + Compute the complex conjugates of complex numbers in "a", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[i+31:i] XOR FP32(-0.0) +ENDFOR +dst[MAX:128] := 0 + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compute the complex conjugates of complex numbers in "a", and store the results - in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask - bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) - floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * - vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] XOR FP32(-0.0) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compute the complex conjugates of complex numbers in "a", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR FP32(-0.0) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compute the complex conjugates of complex numbers in "a", and store the results - in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask - bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) - floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * - vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] XOR FP32(-0.0) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compute the complex conjugates of complex numbers in "a", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR FP32(-0.0) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Compute the complex conjugates of complex numbers in "a", and store the results - in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is - not set). Each complex number is composed of two adjacent half-precision (16-bit) - floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * - vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] XOR FP32(-0.0) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + Compute the complex conjugates of complex numbers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR FP32(-0.0) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Compute the complex conjugates of complex numbers in "a", and store the results - in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is - not set). Each complex number is composed of two adjacent half-precision (16-bit) - floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * - vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] XOR FP32(-0.0) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - AVX512_FP16 - AVX512VL -
immintrin.h
- Arithmetic + + + + Compute the complex conjugates of complex numbers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR FP32(-0.0) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k". - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 7 - k[j] := (a.fp16[j] OP b.fp16[j]) ? 1 : 0 - ENDFOR - k[MAX:8] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 7 + k[j] := (a.fp16[j] OP b.fp16[j]) ? 1 : 0 +ENDFOR +k[MAX:8] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Compare
- - - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit - is not set). - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 7 - IF k1[j] - k[j] := ( a.fp16[j] OP b.fp16[j] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Compare + + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 7 + IF k1[j] + k[j] := ( a.fp16[j] OP b.fp16[j] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Compare
- - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k". - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 15 - k[j] := (a.fp16[j] OP b.fp16[j]) ? 1 : 0 - ENDFOR - k[MAX:16] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Compare + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 15 + k[j] := (a.fp16[j] OP b.fp16[j]) ? 1 : 0 +ENDFOR +k[MAX:16] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Compare
- - - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit - is not set). - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 15 - IF k1[j] - k[j] := ( a.fp16[j] OP b.fp16[j] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Compare + + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 15 + IF k1[j] + k[j] := ( a.fp16[j] OP b.fp16[j] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Compare
- - - Convert packed signed 16-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst". - - FOR j := 0 TO 7 - dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed signed 16-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 TO 7 + dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed signed 16-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed signed 16-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed signed 16-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed signed 16-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed signed 16-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst". - - FOR j := 0 TO 15 - dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed signed 16-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 TO 15 + dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed signed 16-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed signed 16-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed signed 16-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed signed 16-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed unsigned 16-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 TO 7 - dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed unsigned 16-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 TO 7 + dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 16-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed unsigned 16-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed unsigned 16-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed unsigned 16-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed unsigned 16-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 TO 15 - dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed unsigned 16-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 TO 15 + dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 16-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed unsigned 16-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed unsigned 16-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed unsigned 16-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed signed 32-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst". The upper 64 bits of "dst" are - zeroed out. - - FOR j := 0 TO 3 - dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed signed 32-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". The upper 64 bits of "dst" are zeroed out. + +FOR j := 0 TO 3 + dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed signed 32-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). The upper 64 bits of - "dst" are zeroed out. - - FOR j := 0 TO 3 - IF k[j] - dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed signed 32-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The upper 64 bits of "dst" are zeroed out. + +FOR j := 0 TO 3 + IF k[j] + dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed signed 32-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). The upper 64 bits of "dst" are - zeroed out. - - FOR j := 0 TO 3 - IF k[j] - dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed signed 32-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The upper 64 bits of "dst" are zeroed out. + +FOR j := 0 TO 3 + IF k[j] + dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed signed 32-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst". - - FOR j := 0 TO 7 - dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed signed 32-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 TO 7 + dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed signed 32-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed signed 32-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed signed 32-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed signed 32-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed unsigned 32-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst". The upper 64 bits of - "dst" are zeroed out. - - FOR j := 0 TO 3 - dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed unsigned 32-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". The upper 64 bits of "dst" are zeroed out. + +FOR j := 0 TO 3 + dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 32-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). The upper - 64 bits of "dst" are zeroed out. - - FOR j := 0 TO 3 - IF k[j] - dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed unsigned 32-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The upper 64 bits of "dst" are zeroed out. + +FOR j := 0 TO 3 + IF k[j] + dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed unsigned 32-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). The upper 64 bits - of "dst" are zeroed out. - - FOR j := 0 TO 3 - IF k[j] - dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed unsigned 32-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The upper 64 bits of "dst" are zeroed out. + +FOR j := 0 TO 3 + IF k[j] + dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed unsigned 32-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 TO 7 - dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed unsigned 32-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 TO 7 + dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 32-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed unsigned 32-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed unsigned 32-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed unsigned 32-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed signed 64-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst". The upper 96 bits of "dst" are - zeroed out. - - FOR j := 0 TO 1 - dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) - ENDFOR - dst[MAX:32] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed signed 64-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". The upper 96 bits of "dst" are zeroed out. + +FOR j := 0 TO 1 + dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) +ENDFOR +dst[MAX:32] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed signed 64-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). The upper 96 bits of - "dst" are zeroed out. - - FOR j := 0 TO 1 - IF k[j] - dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:32] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed signed 64-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The upper 96 bits of "dst" are zeroed out. + +FOR j := 0 TO 1 + IF k[j] + dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:32] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed signed 64-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). The upper 96 bits of "dst" are - zeroed out. - - FOR j := 0 TO 1 - IF k[j] - dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:32] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed signed 64-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The upper 96 bits of "dst" are zeroed out. + +FOR j := 0 TO 1 + IF k[j] + dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:32] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed signed 64-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst". The upper 64 bits of "dst" are - zeroed out. - - FOR j := 0 TO 3 - dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed signed 64-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". The upper 64 bits of "dst" are zeroed out. + +FOR j := 0 TO 3 + dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed signed 64-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). The upper 64 bits of - "dst" are zeroed out. - - FOR j := 0 TO 3 - IF k[j] - dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed signed 64-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The upper 64 bits of "dst" are zeroed out. + +FOR j := 0 TO 3 + IF k[j] + dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed signed 64-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). The upper 64 bits of "dst" are - zeroed out. - - FOR j := 0 TO 3 - IF k[j] - dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed signed 64-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The upper 64 bits of "dst" are zeroed out. + +FOR j := 0 TO 3 + IF k[j] + dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed unsigned 64-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst". The upper 96 bits of - "dst" are zeroed out. - - FOR j := 0 TO 1 - dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) - ENDFOR - dst[MAX:32] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed unsigned 64-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". The upper 96 bits of "dst" are zeroed out. + +FOR j := 0 TO 1 + dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) +ENDFOR +dst[MAX:32] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 64-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). The upper - 96 bits of "dst" are zeroed out. - - FOR j := 0 TO 1 - IF k[j] - dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:32] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed unsigned 64-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The upper 96 bits of "dst" are zeroed out. + +FOR j := 0 TO 1 + IF k[j] + dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:32] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed unsigned 64-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). The upper 96 bits - of "dst" are zeroed out. - - FOR j := 0 TO 1 - IF k[j] - dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:32] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The upper 96 bits of "dst" are zeroed out. + +FOR j := 0 TO 1 + IF k[j] + dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:32] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed unsigned 64-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst". The upper 64 bits of - "dst" are zeroed out. - - FOR j := 0 TO 3 - dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed unsigned 64-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". The upper 64 bits of "dst" are zeroed out. + +FOR j := 0 TO 3 + dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 64-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). The upper - 64 bits of "dst" are zeroed out. - - FOR j := 0 TO 3 - IF k[j] - dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed unsigned 64-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The upper 64 bits of "dst" are zeroed out. + +FOR j := 0 TO 3 + IF k[j] + dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed unsigned 64-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). The upper 64 bits - of "dst" are zeroed out. - - FOR j := 0 TO 3 - IF k[j] - dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The upper 64 bits of "dst" are zeroed out. + +FOR j := 0 TO 3 + IF k[j] + dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst". - The upper 96 bits of "dst" are zeroed out. - - FOR j := 0 TO 1 - dst.fp16[j] := Convert_FP64_To_FP16(a.fp64[j]) - ENDFOR - dst[MAX:32] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". The upper 96 bits of "dst" are zeroed out. + +FOR j := 0 TO 1 + dst.fp16[j] := Convert_FP64_To_FP16(a.fp64[j]) +ENDFOR +dst[MAX:32] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). The upper 96 bits of "dst" are zeroed out. - - FOR j := 0 TO 1 - IF k[j] - dst.fp16[j] := Convert_FP64_To_FP16(a.fp64[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:32] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The upper 96 bits of "dst" are zeroed out. + +FOR j := 0 TO 1 + IF k[j] + dst.fp16[j] := Convert_FP64_To_FP16(a.fp64[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:32] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - The upper 96 bits of "dst" are zeroed out. - - FOR j := 0 TO 1 - IF k[j] - dst.fp16[j] := Convert_FP64_To_FP16(a.fp64[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:32] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The upper 96 bits of "dst" are zeroed out. + +FOR j := 0 TO 1 + IF k[j] + dst.fp16[j] := Convert_FP64_To_FP16(a.fp64[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:32] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst". - The upper 64 bits of "dst" are zeroed out. - - FOR j := 0 TO 3 - dst.fp16[j] := Convert_FP64_To_FP16(a.fp64[j]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". The upper 64 bits of "dst" are zeroed out. + +FOR j := 0 TO 3 + dst.fp16[j] := Convert_FP64_To_FP16(a.fp64[j]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). The upper 64 bits of "dst" are zeroed out. - - FOR j := 0 TO 3 - IF k[j] - dst.fp16[j] := Convert_FP64_To_FP16(a.fp64[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The upper 64 bits of "dst" are zeroed out. + +FOR j := 0 TO 3 + IF k[j] + dst.fp16[j] := Convert_FP64_To_FP16(a.fp64[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - The upper 64 bits of "dst" are zeroed out. - - FOR j := 0 TO 3 - IF k[j] - dst.fp16[j] := Convert_FP64_To_FP16(a.fp64[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The upper 64 bits of "dst" are zeroed out. + +FOR j := 0 TO 3 + IF k[j] + dst.fp16[j] := Convert_FP64_To_FP16(a.fp64[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst". - The upper 64 bits of "dst" are zeroed out. - - FOR j := 0 to 3 - dst.fp16[j] := Convert_FP32_To_FP16(a.fp32[j]) - ENDFOR - dst[MAX:64] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". The upper 64 bits of "dst" are zeroed out. + +FOR j := 0 to 3 + dst.fp16[j] := Convert_FP32_To_FP16(a.fp32[j]) +ENDFOR +dst[MAX:64] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). The upper 64 bits of "dst" are zeroed out. - - FOR j := 0 to 3 - IF k[j] - dst.fp16[j] := Convert_FP32_To_FP16(a.fp32[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The upper 64 bits of "dst" are zeroed out. + +FOR j := 0 to 3 + IF k[j] + dst.fp16[j] := Convert_FP32_To_FP16(a.fp32[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - The upper 64 bits of "dst" are zeroed out. - - FOR j := 0 to 3 - IF k[j] - dst.fp16[j] := Convert_FP32_To_FP16(a.fp32[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:64] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The upper 64 bits of "dst" are zeroed out. + +FOR j := 0 to 3 + IF k[j] + dst.fp16[j] := Convert_FP32_To_FP16(a.fp32[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:64] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 7 - dst.fp16[j] := Convert_FP32_To_FP16(a.fp32[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 7 + dst.fp16[j] := Convert_FP32_To_FP16(a.fp32[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 7 - IF k[j] - dst.fp16[j] := Convert_FP32_To_FP16(a.fp32[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + dst.fp16[j] := Convert_FP32_To_FP16(a.fp32[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - IF k[j] - dst.fp16[j] := Convert_FP32_To_FP16(a.fp32[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + dst.fp16[j] := Convert_FP32_To_FP16(a.fp32[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 32-bit integers, and store the results in "dst". - - FOR j := 0 TO 3 - dst.dword[j] := Convert_FP16_To_Int32(a.fp16[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 TO 3 + dst.dword[j] := Convert_FP16_To_Int32(a.fp16[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 3 - IF k[j] - dst.dword[j] := Convert_FP16_To_Int32(a.fp16[j]) - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 3 + IF k[j] + dst.dword[j] := Convert_FP16_To_Int32(a.fp16[j]) + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 TO 3 - IF k[j] - dst.dword[j] := Convert_FP16_To_Int32(a.fp16[j]) - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 3 + IF k[j] + dst.dword[j] := Convert_FP16_To_Int32(a.fp16[j]) + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 32-bit integers, and store the results in "dst". - - FOR j := 0 TO 7 - dst.dword[j] := Convert_FP16_To_Int32(a.fp16[j]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 TO 7 + dst.dword[j] := Convert_FP16_To_Int32(a.fp16[j]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.dword[j] := Convert_FP16_To_Int32(a.fp16[j]) - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.dword[j] := Convert_FP16_To_Int32(a.fp16[j]) + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.dword[j] := Convert_FP16_To_Int32(a.fp16[j]) - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.dword[j] := Convert_FP16_To_Int32(a.fp16[j]) + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 32-bit integers with truncation, and store the results in "dst". - - FOR j := 0 TO 3 - dst.dword[j] := Convert_FP16_To_Int32_Truncate(a.fp16[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 TO 3 + dst.dword[j] := Convert_FP16_To_Int32_Truncate(a.fp16[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 32-bit integers with truncation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 3 - IF k[j] - dst.dword[j] := Convert_FP16_To_Int32_Truncate(a.fp16[j]) - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 3 + IF k[j] + dst.dword[j] := Convert_FP16_To_Int32_Truncate(a.fp16[j]) + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 32-bit integers with truncation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 3 - IF k[j] - dst.dword[j] := Convert_FP16_To_Int32_Truncate(a.fp16[j]) - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 3 + IF k[j] + dst.dword[j] := Convert_FP16_To_Int32_Truncate(a.fp16[j]) + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 32-bit integers with truncation, and store the results in "dst". - - FOR j := 0 TO 7 - dst.dword[j] := Convert_FP16_To_Int32_Truncate(a.fp16[j]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 TO 7 + dst.dword[j] := Convert_FP16_To_Int32_Truncate(a.fp16[j]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 32-bit integers with truncation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.dword[j] := Convert_FP16_To_Int32_Truncate(a.fp16[j]) - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.dword[j] := Convert_FP16_To_Int32_Truncate(a.fp16[j]) + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 32-bit integers with truncation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.dword[j] := Convert_FP16_To_Int32_Truncate(a.fp16[j]) - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.dword[j] := Convert_FP16_To_Int32_Truncate(a.fp16[j]) + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 32-bit integers, and store the results in "dst". - - FOR j := 0 TO 3 - dst.dword[j] := Convert_FP16_To_UInt32(a.fp16[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst". + +FOR j := 0 TO 3 + dst.dword[j] := Convert_FP16_To_UInt32(a.fp16[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 3 - IF k[j] - dst.dword[j] := Convert_FP16_To_UInt32(a.fp16[j]) - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 3 + IF k[j] + dst.dword[j] := Convert_FP16_To_UInt32(a.fp16[j]) + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 3 - IF k[j] - dst.dword[j] := Convert_FP16_To_UInt32(a.fp16[j]) - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 3 + IF k[j] + dst.dword[j] := Convert_FP16_To_UInt32(a.fp16[j]) + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 32-bit integers, and store the results in "dst". - - FOR j := 0 TO 7 - dst.dword[j] := Convert_FP16_To_UInt32(a.fp16[j]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst". + +FOR j := 0 TO 7 + dst.dword[j] := Convert_FP16_To_UInt32(a.fp16[j]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.dword[j] := Convert_FP16_To_UInt32(a.fp16[j]) - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.dword[j] := Convert_FP16_To_UInt32(a.fp16[j]) + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.dword[j] := Convert_FP16_To_UInt32(a.fp16[j]) - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.dword[j] := Convert_FP16_To_UInt32(a.fp16[j]) + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 32-bit integers with truncation, and store the results in "dst". - - FOR j := 0 TO 3 - dst.dword[j] := Convert_FP16_To_UInt32_Truncate(a.fp16[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 TO 3 + dst.dword[j] := Convert_FP16_To_UInt32_Truncate(a.fp16[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 32-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 3 - IF k[j] - dst.dword[j] := Convert_FP16_To_UInt32_Truncate(a.fp16[j]) - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 3 + IF k[j] + dst.dword[j] := Convert_FP16_To_UInt32_Truncate(a.fp16[j]) + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 3 - IF k[j] - dst.dword[j] := Convert_FP16_To_UInt32_Truncate(a.fp16[j]) - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 3 + IF k[j] + dst.dword[j] := Convert_FP16_To_UInt32_Truncate(a.fp16[j]) + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 32-bit integers with truncation, and store the results in "dst". - - FOR j := 0 TO 7 - dst.dword[j] := Convert_FP16_To_UInt32_Truncate(a.fp16[j]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 TO 7 + dst.dword[j] := Convert_FP16_To_UInt32_Truncate(a.fp16[j]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 32-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.dword[j] := Convert_FP16_To_UInt32_Truncate(a.fp16[j]) - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.dword[j] := Convert_FP16_To_UInt32_Truncate(a.fp16[j]) + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.dword[j] := Convert_FP16_To_UInt32_Truncate(a.fp16[j]) - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.dword[j] := Convert_FP16_To_UInt32_Truncate(a.fp16[j]) + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 64-bit integers, and store the results in "dst". - - FOR j := 0 TO 1 - dst.qword[j] := Convert_FP16_To_Int64(a.fp16[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 TO 1 + dst.qword[j] := Convert_FP16_To_Int64(a.fp16[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 1 - IF k[j] - dst.qword[j] := Convert_FP16_To_Int64(a.fp16[j]) - ELSE - dst.qword[j] := src.qword[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 1 + IF k[j] + dst.qword[j] := Convert_FP16_To_Int64(a.fp16[j]) + ELSE + dst.qword[j] := src.qword[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 TO 1 - IF k[j] - dst.qword[j] := Convert_FP16_To_Int64(a.fp16[j]) - ELSE - dst.qword[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 1 + IF k[j] + dst.qword[j] := Convert_FP16_To_Int64(a.fp16[j]) + ELSE + dst.qword[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 64-bit integers, and store the results in "dst". - - FOR j := 0 TO 3 - dst.qword[j] := Convert_FP16_To_Int64(a.fp16[j]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 TO 3 + dst.qword[j] := Convert_FP16_To_Int64(a.fp16[j]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 3 - IF k[j] - dst.qword[j] := Convert_FP16_To_Int64(a.fp16[j]) - ELSE - dst.qword[j] := src.qword[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 3 + IF k[j] + dst.qword[j] := Convert_FP16_To_Int64(a.fp16[j]) + ELSE + dst.qword[j] := src.qword[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 TO 3 - IF k[j] - dst.qword[j] := Convert_FP16_To_Int64(a.fp16[j]) - ELSE - dst.qword[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 3 + IF k[j] + dst.qword[j] := Convert_FP16_To_Int64(a.fp16[j]) + ELSE + dst.qword[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 64-bit integers with truncation, and store the results in "dst". - - FOR j := 0 TO 1 - dst.qword[j] := Convert_FP16_To_Int64_Truncate(a.fp16[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 TO 1 + dst.qword[j] := Convert_FP16_To_Int64_Truncate(a.fp16[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 64-bit integers with truncation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 1 - IF k[j] - dst.qword[j] := Convert_FP16_To_Int64_Truncate(a.fp16[j]) - ELSE - dst.qword[j] := src.qword[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 1 + IF k[j] + dst.qword[j] := Convert_FP16_To_Int64_Truncate(a.fp16[j]) + ELSE + dst.qword[j] := src.qword[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 64-bit integers with truncation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 1 - IF k[j] - dst.qword[j] := Convert_FP16_To_Int64_Truncate(a.fp16[j]) - ELSE - dst.qword[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 1 + IF k[j] + dst.qword[j] := Convert_FP16_To_Int64_Truncate(a.fp16[j]) + ELSE + dst.qword[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 64-bit integers with truncation, and store the results in "dst". - - FOR j := 0 TO 3 - dst.qword[j] := Convert_FP16_To_Int64_Truncate(a.fp16[j]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 TO 3 + dst.qword[j] := Convert_FP16_To_Int64_Truncate(a.fp16[j]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 64-bit integers with truncation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 3 - IF k[j] - dst.qword[j] := Convert_FP16_To_Int64_Truncate(a.fp16[j]) - ELSE - dst.qword[j] := src.qword[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 3 + IF k[j] + dst.qword[j] := Convert_FP16_To_Int64_Truncate(a.fp16[j]) + ELSE + dst.qword[j] := src.qword[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 64-bit integers with truncation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 3 - IF k[j] - dst.qword[j] := Convert_FP16_To_Int64_Truncate(a.fp16[j]) - ELSE - dst.qword[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 3 + IF k[j] + dst.qword[j] := Convert_FP16_To_Int64_Truncate(a.fp16[j]) + ELSE + dst.qword[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 64-bit integers, and store the results in "dst". - - FOR j := 0 TO 1 - dst.qword[j] := Convert_FP16_To_UInt64(a.fp16[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst". + +FOR j := 0 TO 1 + dst.qword[j] := Convert_FP16_To_UInt64(a.fp16[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 1 - IF k[j] - dst.qword[j] := Convert_FP16_To_UInt64(a.fp16[j]) - ELSE - dst.qword[j] := src.qword[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 1 + IF k[j] + dst.qword[j] := Convert_FP16_To_UInt64(a.fp16[j]) + ELSE + dst.qword[j] := src.qword[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 1 - IF k[j] - dst.qword[j] := Convert_FP16_To_UInt64(a.fp16[j]) - ELSE - dst.qword[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 1 + IF k[j] + dst.qword[j] := Convert_FP16_To_UInt64(a.fp16[j]) + ELSE + dst.qword[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 64-bit integers, and store the results in "dst". - - FOR j := 0 TO 3 - dst.qword[j] := Convert_FP16_To_UInt64(a.fp16[j]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst". + +FOR j := 0 TO 3 + dst.qword[j] := Convert_FP16_To_UInt64(a.fp16[j]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 3 - IF k[j] - dst.qword[j] := Convert_FP16_To_UInt64(a.fp16[j]) - ELSE - dst.qword[j] := src.qword[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 3 + IF k[j] + dst.qword[j] := Convert_FP16_To_UInt64(a.fp16[j]) + ELSE + dst.qword[j] := src.qword[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 3 - IF k[j] - dst.qword[j] := Convert_FP16_To_UInt64(a.fp16[j]) - ELSE - dst.qword[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 3 + IF k[j] + dst.qword[j] := Convert_FP16_To_UInt64(a.fp16[j]) + ELSE + dst.qword[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 64-bit integers with truncation, and store the results in "dst". - - FOR j := 0 TO 1 - dst.qword[j] := Convert_FP16_To_UInt64_Truncate(a.fp16[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 TO 1 + dst.qword[j] := Convert_FP16_To_UInt64_Truncate(a.fp16[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 64-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 1 - IF k[j] - dst.qword[j] := Convert_FP16_To_UInt64_Truncate(a.fp16[j]) - ELSE - dst.qword[j] := src.qword[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 1 + IF k[j] + dst.qword[j] := Convert_FP16_To_UInt64_Truncate(a.fp16[j]) + ELSE + dst.qword[j] := src.qword[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 1 - IF k[j] - dst.qword[j] := Convert_FP16_To_UInt64_Truncate(a.fp16[j]) - ELSE - dst.qword[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 1 + IF k[j] + dst.qword[j] := Convert_FP16_To_UInt64_Truncate(a.fp16[j]) + ELSE + dst.qword[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 64-bit integers with truncation, and store the results in "dst". - - FOR j := 0 TO 3 - dst.qword[j] := Convert_FP16_To_UInt64_Truncate(a.fp16[j]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 TO 3 + dst.qword[j] := Convert_FP16_To_UInt64_Truncate(a.fp16[j]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 64-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 3 - IF k[j] - dst.qword[j] := Convert_FP16_To_UInt64_Truncate(a.fp16[j]) - ELSE - dst.qword[j] := src.qword[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 3 + IF k[j] + dst.qword[j] := Convert_FP16_To_UInt64_Truncate(a.fp16[j]) + ELSE + dst.qword[j] := src.qword[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 3 - IF k[j] - dst.qword[j] := Convert_FP16_To_UInt64_Truncate(a.fp16[j]) - ELSE - dst.qword[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 3 + IF k[j] + dst.qword[j] := Convert_FP16_To_UInt64_Truncate(a.fp16[j]) + ELSE + dst.qword[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 16-bit integers, and store the results in "dst". - - FOR j := 0 TO 7 - dst.word[j] := Convert_FP16_To_Int16(a.fp16[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 16-bit integers, and store the results in "dst". + +FOR j := 0 TO 7 + dst.word[j] := Convert_FP16_To_Int16(a.fp16[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.word[j] := Convert_FP16_To_Int16(a.fp16[j]) - ELSE - dst.word[j] := src.word[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.word[j] := Convert_FP16_To_Int16(a.fp16[j]) + ELSE + dst.word[j] := src.word[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.word[j] := Convert_FP16_To_Int16(a.fp16[j]) - ELSE - dst.word[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.word[j] := Convert_FP16_To_Int16(a.fp16[j]) + ELSE + dst.word[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 16-bit integers, and store the results in "dst". - - FOR j := 0 TO 15 - dst.word[j] := Convert_FP16_To_Int16(a.fp16[j]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 16-bit integers, and store the results in "dst". + +FOR j := 0 TO 15 + dst.word[j] := Convert_FP16_To_Int16(a.fp16[j]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.word[j] := Convert_FP16_To_Int16(a.fp16[j]) - ELSE - dst.word[j] := src.word[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.word[j] := Convert_FP16_To_Int16(a.fp16[j]) + ELSE + dst.word[j] := src.word[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.word[j] := Convert_FP16_To_Int16(a.fp16[j]) - ELSE - dst.word[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.word[j] := Convert_FP16_To_Int16(a.fp16[j]) + ELSE + dst.word[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 16-bit integers with truncation, and store the results in "dst". - - FOR j := 0 TO 7 - dst.word[j] := Convert_FP16_To_Int16_Truncate(a.fp16[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 16-bit integers with truncation, and store the results in "dst". + +FOR j := 0 TO 7 + dst.word[j] := Convert_FP16_To_Int16_Truncate(a.fp16[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 16-bit integers with truncation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.word[j] := Convert_FP16_To_Int16_Truncate(a.fp16[j]) - ELSE - dst.word[j] := src.word[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 16-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.word[j] := Convert_FP16_To_Int16_Truncate(a.fp16[j]) + ELSE + dst.word[j] := src.word[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 16-bit integers with truncation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.word[j] := Convert_FP16_To_Int16_Truncate(a.fp16[j]) - ELSE - dst.word[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 16-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.word[j] := Convert_FP16_To_Int16_Truncate(a.fp16[j]) + ELSE + dst.word[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 16-bit integers with truncation, and store the results in "dst". - - FOR j := 0 TO 15 - dst.word[j] := Convert_FP16_To_Int16_Truncate(a.fp16[j]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 16-bit integers with truncation, and store the results in "dst". + +FOR j := 0 TO 15 + dst.word[j] := Convert_FP16_To_Int16_Truncate(a.fp16[j]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 16-bit integers with truncation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.word[j] := Convert_FP16_To_Int16_Truncate(a.fp16[j]) - ELSE - dst.word[j] := src.word[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 16-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.word[j] := Convert_FP16_To_Int16_Truncate(a.fp16[j]) + ELSE + dst.word[j] := src.word[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 16-bit integers with truncation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.word[j] := Convert_FP16_To_Int16_Truncate(a.fp16[j]) - ELSE - dst.word[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 16-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.word[j] := Convert_FP16_To_Int16_Truncate(a.fp16[j]) + ELSE + dst.word[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 16-bit integers, and store the results in "dst". - - FOR j := 0 TO 7 - dst.word[j] := Convert_FP16_To_UInt16(a.fp16[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 16-bit integers, and store the results in "dst". + +FOR j := 0 TO 7 + dst.word[j] := Convert_FP16_To_UInt16(a.fp16[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 16-bit integers, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.word[j] := Convert_FP16_To_UInt16(a.fp16[j]) - ELSE - dst.word[j] := src.word[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.word[j] := Convert_FP16_To_UInt16(a.fp16[j]) + ELSE + dst.word[j] := src.word[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 16-bit integers, and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.word[j] := Convert_FP16_To_UInt16(a.fp16[j]) - ELSE - dst.word[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.word[j] := Convert_FP16_To_UInt16(a.fp16[j]) + ELSE + dst.word[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 16-bit integers, and store the results in "dst". - - FOR j := 0 TO 15 - dst.word[j] := Convert_FP16_To_UInt16(a.fp16[j]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 16-bit integers, and store the results in "dst". + +FOR j := 0 TO 15 + dst.word[j] := Convert_FP16_To_UInt16(a.fp16[j]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 16-bit integers, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.word[j] := Convert_FP16_To_UInt16(a.fp16[j]) - ELSE - dst.word[j] := src.word[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.word[j] := Convert_FP16_To_UInt16(a.fp16[j]) + ELSE + dst.word[j] := src.word[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 16-bit integers, and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.word[j] := Convert_FP16_To_UInt16(a.fp16[j]) - ELSE - dst.word[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.word[j] := Convert_FP16_To_UInt16(a.fp16[j]) + ELSE + dst.word[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 16-bit integers with truncation, and store the results in "dst". - - FOR j := 0 TO 7 - dst.word[j] := Convert_FP16_To_UInt16_Truncate(a.fp16[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 16-bit integers with truncation, and store the results in "dst". + +FOR j := 0 TO 7 + dst.word[j] := Convert_FP16_To_UInt16_Truncate(a.fp16[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 16-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.word[j] := Convert_FP16_To_UInt16_Truncate(a.fp16[j]) - ELSE - dst.word[j] := src.word[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 16-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.word[j] := Convert_FP16_To_UInt16_Truncate(a.fp16[j]) + ELSE + dst.word[j] := src.word[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 16-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.word[j] := Convert_FP16_To_UInt16_Truncate(a.fp16[j]) - ELSE - dst.word[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 16-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.word[j] := Convert_FP16_To_UInt16_Truncate(a.fp16[j]) + ELSE + dst.word[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 16-bit integers with truncation, and store the results in "dst". - - FOR j := 0 TO 15 - dst.word[j] := Convert_FP16_To_UInt16_Truncate(a.fp16[j]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 16-bit integers with truncation, and store the results in "dst". + +FOR j := 0 TO 15 + dst.word[j] := Convert_FP16_To_UInt16_Truncate(a.fp16[j]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 16-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.word[j] := Convert_FP16_To_UInt16_Truncate(a.fp16[j]) - ELSE - dst.word[j] := src.word[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 16-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.word[j] := Convert_FP16_To_UInt16_Truncate(a.fp16[j]) + ELSE + dst.word[j] := src.word[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 16-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.word[j] := Convert_FP16_To_UInt16_Truncate(a.fp16[j]) - ELSE - dst.word[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 16-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.word[j] := Convert_FP16_To_UInt16_Truncate(a.fp16[j]) + ELSE + dst.word[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - double-precision (64-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 1 - dst.fp64[j] := Convert_FP16_To_FP64(a.fp16[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 1 + dst.fp64[j] := Convert_FP16_To_FP64(a.fp16[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - double-precision (64-bit) floating-point elements, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 1 - IF k[j] - dst.fp64[j] := Convert_FP16_To_FP64(a.fp16[j]) - ELSE - dst.fp64[j] := src.fp64[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + IF k[j] + dst.fp64[j] := Convert_FP16_To_FP64(a.fp16[j]) + ELSE + dst.fp64[j] := src.fp64[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - double-precision (64-bit) floating-point elements, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - IF k[j] - dst.fp64[j] := Convert_FP16_To_FP64(a.fp16[j]) - ELSE - dst.fp64[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + IF k[j] + dst.fp64[j] := Convert_FP16_To_FP64(a.fp16[j]) + ELSE + dst.fp64[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - double-precision (64-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 3 - dst.fp64[j] := Convert_FP16_To_FP64(a.fp16[j]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + dst.fp64[j] := Convert_FP16_To_FP64(a.fp16[j]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - double-precision (64-bit) floating-point elements, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - IF k[j] - dst.fp64[j] := Convert_FP16_To_FP64(a.fp16[j]) - ELSE - dst.fp64[j] := src.fp64[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + IF k[j] + dst.fp64[j] := Convert_FP16_To_FP64(a.fp16[j]) + ELSE + dst.fp64[j] := src.fp64[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - double-precision (64-bit) floating-point elements, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - IF k[j] - dst.fp64[j] := Convert_FP16_To_FP64(a.fp16[j]) - ELSE - dst.fp64[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + IF k[j] + dst.fp64[j] := Convert_FP16_To_FP64(a.fp16[j]) + ELSE + dst.fp64[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 3 - dst.fp32[j] := Convert_FP16_To_FP32(a.fp16[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + dst.fp32[j] := Convert_FP16_To_FP32(a.fp16[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - IF k[j] - dst.fp32[j] := Convert_FP16_To_FP32(a.fp16[j]) - ELSE - dst.fp32[j] := src.fp32[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + IF k[j] + dst.fp32[j] := Convert_FP16_To_FP32(a.fp16[j]) + ELSE + dst.fp32[j] := src.fp32[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - IF k[j] - dst.fp32[j] := Convert_FP16_To_FP32(a.fp16[j]) - ELSE - dst.fp32[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + IF k[j] + dst.fp32[j] := Convert_FP16_To_FP32(a.fp16[j]) + ELSE + dst.fp32[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 7 - dst.fp32[j] := Convert_FP16_To_FP32(a.fp16[j]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 7 + dst.fp32[j] := Convert_FP16_To_FP32(a.fp16[j]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - IF k[j] - dst.fp32[j] := Convert_FP16_To_FP32(a.fp16[j]) - ELSE - dst.fp32[j] := src.fp32[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + dst.fp32[j] := Convert_FP16_To_FP32(a.fp16[j]) + ELSE + dst.fp32[j] := src.fp32[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - IF k[j] - dst.fp32[j] := Convert_FP16_To_FP32(a.fp16[j]) - ELSE - dst.fp32[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + dst.fp32[j] := Convert_FP16_To_FP32(a.fp16[j]) + ELSE + dst.fp32[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Convert
- - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b", - and store packed maximum values in "dst". [max_float_note] - - FOR j := 0 to 7 - dst.fp16[j] := (a.fp16[j] > b.fp16[j] ? a.fp16[j] : b.fp16[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Special Math Functions + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst". [max_float_note] + +FOR j := 0 to 7 + dst.fp16[j] := (a.fp16[j] > b.fp16[j] ? a.fp16[j] : b.fp16[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b", - and store packed maximum values in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). [max_float_note] - - FOR j := 0 to 7 - IF k[j] - dst.fp16[j] := (a.fp16[j] > b.fp16[j] ? a.fp16[j] : b.fp16[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Special Math Functions + + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [max_float_note] + +FOR j := 0 to 7 + IF k[j] + dst.fp16[j] := (a.fp16[j] > b.fp16[j] ? a.fp16[j] : b.fp16[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b", - and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). [max_float_note] - - FOR j := 0 to 7 - IF k[j] - dst.fp16[j] := (a.fp16[j] > b.fp16[j] ? a.fp16[j] : b.fp16[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Special Math Functions + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [max_float_note] + +FOR j := 0 to 7 + IF k[j] + dst.fp16[j] := (a.fp16[j] > b.fp16[j] ? a.fp16[j] : b.fp16[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b", - and store packed maximum values in "dst". [max_float_note] - - FOR j := 0 to 15 - dst.fp16[j] := (a.fp16[j] > b.fp16[j] ? a.fp16[j] : b.fp16[j]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Special Math Functions + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst". [max_float_note] + +FOR j := 0 to 15 + dst.fp16[j] := (a.fp16[j] > b.fp16[j] ? a.fp16[j] : b.fp16[j]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b", - and store packed maximum values in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). [max_float_note] - - FOR j := 0 to 15 - IF k[j] - dst.fp16[j] := (a.fp16[j] > b.fp16[j] ? a.fp16[j] : b.fp16[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Special Math Functions + + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [max_float_note] + +FOR j := 0 to 15 + IF k[j] + dst.fp16[j] := (a.fp16[j] > b.fp16[j] ? a.fp16[j] : b.fp16[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b", - and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). [max_float_note] - - FOR j := 0 to 15 - IF k[j] - dst.fp16[j] := (a.fp16[j] > b.fp16[j] ? a.fp16[j] : b.fp16[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Special Math Functions + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [max_float_note] + +FOR j := 0 to 15 + IF k[j] + dst.fp16[j] := (a.fp16[j] > b.fp16[j] ? a.fp16[j] : b.fp16[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - Compare the lower half-precision (16-bit) floating-point elements in "a" and - "b", store the maximum value in the lower element of "dst", and copy the upper 7 packed - elements from "a" to the upper elements of "dst". [max_float_note] - - dst.fp16[0] := (a.fp16[0] > b.fp16[0] ? a.fp16[0] : b.fp16[0]) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Special Math Functions + + + + Compare the lower half-precision (16-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". [max_float_note] + +dst.fp16[0] := (a.fp16[0] > b.fp16[0] ? a.fp16[0] : b.fp16[0]) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - - - Compare the lower half-precision (16-bit) floating-point elements in "a" and - "b", store the maximum value in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed - elements from "a" to the upper elements of "dst". - - IF k[0] - dst.fp16[0] := (a.fp16[0] > b.fp16[0] ? a.fp16[0] : b.fp16[0]) - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Special Math Functions + + + + + + Compare the lower half-precision (16-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := (a.fp16[0] > b.fp16[0] ? a.fp16[0] : b.fp16[0]) +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - - Compare the lower half-precision (16-bit) floating-point elements in "a" and - "b", store the maximum value in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements - from "a" to the upper elements of "dst". - - IF k[0] - dst.fp16[0] := (a.fp16[0] > b.fp16[0] ? a.fp16[0] : b.fp16[0]) - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Special Math Functions + + + + + Compare the lower half-precision (16-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := (a.fp16[0] > b.fp16[0] ? a.fp16[0] : b.fp16[0]) +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - - Compare the lower half-precision (16-bit) floating-point elements in "a" and - "b", store the maximum value in the lower element of "dst", and copy the upper 7 packed - elements from "a" to the upper elements of "dst". [sae_note][max_float_note] - - dst.fp16[0] := (a.fp16[0] > b.fp16[0] ? a.fp16[0] : b.fp16[0]) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Special Math Functions + + + + + Compare the lower half-precision (16-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". [sae_note][max_float_note] + +dst.fp16[0] := (a.fp16[0] > b.fp16[0] ? a.fp16[0] : b.fp16[0]) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - - - - Compare the lower half-precision (16-bit) floating-point elements in "a" and - "b", store the maximum value in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed - elements from "a" to the upper elements of "dst". [sae_note][max_float_note] - - IF k[0] - dst.fp16[0] := (a.fp16[0] > b.fp16[0] ? a.fp16[0] : b.fp16[0]) - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Special Math Functions + + + + + + + Compare the lower half-precision (16-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". [sae_note][max_float_note] + +IF k[0] + dst.fp16[0] := (a.fp16[0] > b.fp16[0] ? a.fp16[0] : b.fp16[0]) +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - - - Compare the lower half-precision (16-bit) floating-point elements in "a" and - "b", store the maximum value in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements - from "a" to the upper elements of "dst". [sae_note][max_float_note] - - IF k[0] - dst.fp16[0] := (a.fp16[0] > b.fp16[0] ? a.fp16[0] : b.fp16[0]) - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Special Math Functions + + + + + + Compare the lower half-precision (16-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". [sae_note][max_float_note] + +IF k[0] + dst.fp16[0] := (a.fp16[0] > b.fp16[0] ? a.fp16[0] : b.fp16[0]) +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b", - and store packed minimum values in "dst". [min_float_note] - - FOR j := 0 to 7 - dst.fp16[j] := (a.fp16[j] < b.fp16[j] ? a.fp16[j] : b.fp16[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Special Math Functions + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst". [min_float_note] + +FOR j := 0 to 7 + dst.fp16[j] := (a.fp16[j] < b.fp16[j] ? a.fp16[j] : b.fp16[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b", - and store packed minimum values in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). [min_float_note] - - FOR j := 0 to 7 - IF k[j] - dst.fp16[j] := (a.fp16[j] < b.fp16[j] ? a.fp16[j] : b.fp16[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Special Math Functions + + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [min_float_note] + +FOR j := 0 to 7 + IF k[j] + dst.fp16[j] := (a.fp16[j] < b.fp16[j] ? a.fp16[j] : b.fp16[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b", - and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). [min_float_note] - - FOR j := 0 to 7 - IF k[j] - dst.fp16[j] := (a.fp16[j] < b.fp16[j] ? a.fp16[j] : b.fp16[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Special Math Functions + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [min_float_note] + +FOR j := 0 to 7 + IF k[j] + dst.fp16[j] := (a.fp16[j] < b.fp16[j] ? a.fp16[j] : b.fp16[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b", - and store packed minimum values in "dst". [min_float_note] - - FOR j := 0 to 15 - dst.fp16[j] := (a.fp16[j] < b.fp16[j] ? a.fp16[j] : b.fp16[j]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Special Math Functions + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst". [min_float_note] + +FOR j := 0 to 15 + dst.fp16[j] := (a.fp16[j] < b.fp16[j] ? a.fp16[j] : b.fp16[j]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b", - and store packed minimum values in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). [min_float_note] - - FOR j := 0 to 15 - IF k[j] - dst.fp16[j] := (a.fp16[j] < b.fp16[j] ? a.fp16[j] : b.fp16[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Special Math Functions + + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [min_float_note] + +FOR j := 0 to 15 + IF k[j] + dst.fp16[j] := (a.fp16[j] < b.fp16[j] ? a.fp16[j] : b.fp16[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b", - and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). [min_float_note] - - FOR j := 0 to 15 - IF k[j] - dst.fp16[j] := (a.fp16[j] < b.fp16[j] ? a.fp16[j] : b.fp16[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Special Math Functions + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [min_float_note] + +FOR j := 0 to 15 + IF k[j] + dst.fp16[j] := (a.fp16[j] < b.fp16[j] ? a.fp16[j] : b.fp16[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - Compare the lower half-precision (16-bit) floating-point elements in "a" and - "b", store the minimum value in the lower element of "dst", and copy the upper 7 packed - elements from "a" to the upper elements of "dst". [min_float_note] - - dst.fp16[0] := (a.fp16[0] < b.fp16[0] ? a.fp16[0] : b.fp16[0]) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Special Math Functions + + + + Compare the lower half-precision (16-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". [min_float_note] + +dst.fp16[0] := (a.fp16[0] < b.fp16[0] ? a.fp16[0] : b.fp16[0]) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - - - Compare the lower half-precision (16-bit) floating-point elements in "a" and - "b", store the minimum value in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed - elements from "a" to the upper elements of "dst". - - IF k[0] - dst.fp16[0] := (a.fp16[0] < b.fp16[0] ? a.fp16[0] : b.fp16[0]) - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Special Math Functions + + + + + + Compare the lower half-precision (16-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := (a.fp16[0] < b.fp16[0] ? a.fp16[0] : b.fp16[0]) +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - - Compare the lower half-precision (16-bit) floating-point elements in "a" and - "b", store the minimum value in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements - from "a" to the upper elements of "dst". - - IF k[0] - dst.fp16[0] := (a.fp16[0] < b.fp16[0] ? a.fp16[0] : b.fp16[0]) - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Special Math Functions + + + + + Compare the lower half-precision (16-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := (a.fp16[0] < b.fp16[0] ? a.fp16[0] : b.fp16[0]) +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - - Compare the lower half-precision (16-bit) floating-point elements in "a" and - "b", store the minimum value in the lower element of "dst", and copy the upper 7 packed - elements from "a" to the upper elements of "dst". [sae_note][min_float_note] - - dst.fp16[0] := (a.fp16[0] < b.fp16[0] ? a.fp16[0] : b.fp16[0]) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Special Math Functions + + + + + Compare the lower half-precision (16-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". [sae_note][min_float_note] + +dst.fp16[0] := (a.fp16[0] < b.fp16[0] ? a.fp16[0] : b.fp16[0]) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - - - - Compare the lower half-precision (16-bit) floating-point elements in "a" and - "b", store the minimum value in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed - elements from "a" to the upper elements of "dst". [sae_note][min_float_note] - - IF k[0] - dst.fp16[0] := (a.fp16[0] < b.fp16[0] ? a.fp16[0] : b.fp16[0]) - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Special Math Functions + + + + + + + Compare the lower half-precision (16-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". [sae_note][min_float_note] + +IF k[0] + dst.fp16[0] := (a.fp16[0] < b.fp16[0] ? a.fp16[0] : b.fp16[0]) +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - - - Compare the lower half-precision (16-bit) floating-point elements in "a" and - "b", store the minimum value in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements - from "a" to the upper elements of "dst". [sae_note][min_float_note] - - IF k[0] - dst.fp16[0] := (a.fp16[0] < b.fp16[0] ? a.fp16[0] : b.fp16[0]) - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Special Math Functions + + + + + + Compare the lower half-precision (16-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". [sae_note][min_float_note] + +IF k[0] + dst.fp16[0] := (a.fp16[0] < b.fp16[0] ? a.fp16[0] : b.fp16[0]) +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Special Math Functions
- - - - Round packed half-precision (16-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst". - [round_imm_note] - - DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { - m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) - RETURN tmp.fp16 - } - FOR i := 0 to 7 - dst.fp16[i] := RoundScaleFP16(a.fp16[i], imm8) - ENDFOR - dest[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + Round packed half-precision (16-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". [round_imm_note] + +DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { + m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) + RETURN tmp.fp16 +} +FOR i := 0 to 7 + dst.fp16[i] := RoundScaleFP16(a.fp16[i], imm8) +ENDFOR +dest[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Round packed half-precision (16-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). [round_imm_note] - - DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { - m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) - RETURN tmp.fp16 - } - FOR i := 0 to 7 - IF k[i] - dst.fp16[i] := RoundScaleFP16(a.fp16[i], imm8) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dest[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Round packed half-precision (16-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note] + +DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { + m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) + RETURN tmp.fp16 +} +FOR i := 0 to 7 + IF k[i] + dst.fp16[i] := RoundScaleFP16(a.fp16[i], imm8) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dest[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Round packed half-precision (16-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [round_imm_note] - - DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { - m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) - RETURN tmp.fp16 - } - FOR i := 0 to 7 - IF k[i] - dst.fp16[i] := RoundScaleFP16(a.fp16[i], imm8) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dest[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Round packed half-precision (16-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note] + +DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { + m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) + RETURN tmp.fp16 +} +FOR i := 0 to 7 + IF k[i] + dst.fp16[i] := RoundScaleFP16(a.fp16[i], imm8) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dest[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Round packed half-precision (16-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst". - [round_imm_note] - - DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { - m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) - RETURN tmp.fp16 - } - FOR i := 0 to 15 - dst.fp16[i] := RoundScaleFP16(a.fp16[i], imm8) - ENDFOR - dest[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + Round packed half-precision (16-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". [round_imm_note] + +DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { + m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) + RETURN tmp.fp16 +} +FOR i := 0 to 15 + dst.fp16[i] := RoundScaleFP16(a.fp16[i], imm8) +ENDFOR +dest[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Round packed half-precision (16-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). [round_imm_note] - - DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { - m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) - RETURN tmp.fp16 - } - FOR i := 0 to 15 - IF k[i] - dst.fp16[i] := RoundScaleFP16(a.fp16[i], imm8) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dest[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Round packed half-precision (16-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note] + +DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { + m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) + RETURN tmp.fp16 +} +FOR i := 0 to 15 + IF k[i] + dst.fp16[i] := RoundScaleFP16(a.fp16[i], imm8) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dest[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Round packed half-precision (16-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [round_imm_note] - - DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { - m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) - RETURN tmp.fp16 - } - FOR i := 0 to 15 - IF k[i] - dst.fp16[i] := RoundScaleFP16(a.fp16[i], imm8) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dest[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Round packed half-precision (16-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note] + +DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { + m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) + RETURN tmp.fp16 +} +FOR i := 0 to 15 + IF k[i] + dst.fp16[i] := RoundScaleFP16(a.fp16[i], imm8) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dest[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Convert the exponent of each packed half-precision (16-bit) floating-point - element in "a" to a half-precision (16-bit) floating-point number representing the - integer exponent, and store the results in "dst". This intrinsic essentially calculates - "floor(log2(x))" for each element. - FOR i := 0 to 7 - dst.fp16[i] := ConvertExpFP16(a.fp16[i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + Convert the exponent of each packed half-precision (16-bit) floating-point element in "a" to a half-precision (16-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR i := 0 to 7 + dst.fp16[i] := ConvertExpFP16(a.fp16[i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Convert the exponent of each packed half-precision (16-bit) floating-point - element in "a" to a half-precision (16-bit) floating-point number representing the - integer exponent, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). This intrinsic - essentially calculates "floor(log2(x))" for each element. - FOR i := 0 to 7 - IF k[i] - dst.fp16[i] := ConvertExpFP16(a.fp16[i]) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Convert the exponent of each packed half-precision (16-bit) floating-point element in "a" to a half-precision (16-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR i := 0 to 7 + IF k[i] + dst.fp16[i] := ConvertExpFP16(a.fp16[i]) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Convert the exponent of each packed half-precision (16-bit) floating-point - element in "a" to a half-precision (16-bit) floating-point number representing the - integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). This intrinsic essentially calculates - "floor(log2(x))" for each element. - FOR i := 0 to 7 - IF k[i] - dst.fp16[i] := ConvertExpFP16(a.fp16[i]) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + Convert the exponent of each packed half-precision (16-bit) floating-point element in "a" to a half-precision (16-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR i := 0 to 7 + IF k[i] + dst.fp16[i] := ConvertExpFP16(a.fp16[i]) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Convert the exponent of each packed half-precision (16-bit) floating-point - element in "a" to a half-precision (16-bit) floating-point number representing the - integer exponent, and store the results in "dst". This intrinsic essentially calculates - "floor(log2(x))" for each element. - FOR i := 0 to 15 - dst.fp16[i] := ConvertExpFP16(a.fp16[i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + Convert the exponent of each packed half-precision (16-bit) floating-point element in "a" to a half-precision (16-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR i := 0 to 15 + dst.fp16[i] := ConvertExpFP16(a.fp16[i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Convert the exponent of each packed half-precision (16-bit) floating-point - element in "a" to a half-precision (16-bit) floating-point number representing the - integer exponent, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). This intrinsic - essentially calculates "floor(log2(x))" for each element. - FOR i := 0 to 15 - IF k[i] - dst.fp16[i] := ConvertExpFP16(a.fp16[i]) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Convert the exponent of each packed half-precision (16-bit) floating-point element in "a" to a half-precision (16-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR i := 0 to 15 + IF k[i] + dst.fp16[i] := ConvertExpFP16(a.fp16[i]) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Convert the exponent of each packed half-precision (16-bit) floating-point - element in "a" to a half-precision (16-bit) floating-point number representing the - integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). This intrinsic essentially calculates - "floor(log2(x))" for each element. - FOR i := 0 to 15 - IF k[i] - dst.fp16[i] := ConvertExpFP16(a.fp16[i]) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + Convert the exponent of each packed half-precision (16-bit) floating-point element in "a" to a half-precision (16-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR i := 0 to 15 + IF k[i] + dst.fp16[i] := ConvertExpFP16(a.fp16[i]) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Normalize the mantissas of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst". This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" - and the sign depends on "sign" and the source sign. - [getmant_note] - FOR i := 0 TO 7 - dst.fp16[i] := GetNormalizedMantissaFP16(a.fp16[i], norm, sign) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Normalize the mantissas of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" and the sign depends on "sign" and the source sign. + [getmant_note] + FOR i := 0 TO 7 + dst.fp16[i] := GetNormalizedMantissaFP16(a.fp16[i], norm, sign) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Normalize the mantissas of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). This intrinsic essentially - calculates "±(2^k)*|x.significand|", where "k" depends on the interval range - defined by "norm" and the sign depends on "sign" and the source sign. - [getmant_note] - FOR i := 0 TO 7 - IF k[i] - dst.fp16[i] := GetNormalizedMantissaFP16(a.fp16[i], norm, sign) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Normalize the mantissas of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" and the sign depends on "sign" and the source sign. + [getmant_note] + FOR i := 0 TO 7 + IF k[i] + dst.fp16[i] := GetNormalizedMantissaFP16(a.fp16[i], norm, sign) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Normalize the mantissas of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" - and the sign depends on "sign" and the source sign. - [getmant_note] - FOR i := 0 TO 7 - IF k[i] - dst.fp16[i] := GetNormalizedMantissaFP16(a.fp16[i], norm, sign) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Normalize the mantissas of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" and the sign depends on "sign" and the source sign. + [getmant_note] + FOR i := 0 TO 7 + IF k[i] + dst.fp16[i] := GetNormalizedMantissaFP16(a.fp16[i], norm, sign) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Normalize the mantissas of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst". This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" - and the sign depends on "sign" and the source sign. - [getmant_note] - FOR i := 0 TO 15 - dst.fp16[i] := GetNormalizedMantissaFP16(a.fp16[i], norm, sign) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Normalize the mantissas of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" and the sign depends on "sign" and the source sign. + [getmant_note] + FOR i := 0 TO 15 + dst.fp16[i] := GetNormalizedMantissaFP16(a.fp16[i], norm, sign) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - - Normalize the mantissas of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). This intrinsic essentially - calculates "±(2^k)*|x.significand|", where "k" depends on the interval range - defined by "norm" and the sign depends on "sign" and the source sign. - [getmant_note] - FOR i := 0 TO 15 - IF k[i] - dst.fp16[i] := GetNormalizedMantissaFP16(a.fp16[i], norm, sign) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + + Normalize the mantissas of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" and the sign depends on "sign" and the source sign. + [getmant_note] + FOR i := 0 TO 15 + IF k[i] + dst.fp16[i] := GetNormalizedMantissaFP16(a.fp16[i], norm, sign) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Normalize the mantissas of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" - and the sign depends on "sign" and the source sign. - [getmant_note] - FOR i := 0 TO 15 - IF k[i] - dst.fp16[i] := GetNormalizedMantissaFP16(a.fp16[i], norm, sign) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Normalize the mantissas of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" and the sign depends on "sign" and the source sign. + [getmant_note] + FOR i := 0 TO 15 + IF k[i] + dst.fp16[i] := GetNormalizedMantissaFP16(a.fp16[i], norm, sign) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Extract the reduced argument of packed half-precision (16-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst". [round_imm_note] - - DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { - m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) - tmp[15:0] := src[15:0] - tmp[15:0] - IF IsInf(tmp[15:0]) - tmp[15:0] := FP16(0.0) - FI - RETURN tmp[15:0] - } - FOR i := 0 to 7 - dst.fp16[i] := ReduceArgumentFP16(a.fp16[i], imm8) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + Extract the reduced argument of packed half-precision (16-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". [round_imm_note] + +DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { + m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) + tmp[15:0] := src[15:0] - tmp[15:0] + IF IsInf(tmp[15:0]) + tmp[15:0] := FP16(0.0) + FI + RETURN tmp[15:0] +} +FOR i := 0 to 7 + dst.fp16[i] := ReduceArgumentFP16(a.fp16[i], imm8) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Extract the reduced argument of packed half-precision (16-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). [round_imm_note] - - DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { - m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) - tmp[15:0] := src[15:0] - tmp[15:0] - IF IsInf(tmp[15:0]) - tmp[15:0] := FP16(0.0) - FI - RETURN tmp[15:0] - } - FOR i := 0 to 7 - IF k[i] - dst.fp16[i] := ReduceArgumentFP16(a.fp16[i], imm8) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Extract the reduced argument of packed half-precision (16-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note] + +DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { + m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) + tmp[15:0] := src[15:0] - tmp[15:0] + IF IsInf(tmp[15:0]) + tmp[15:0] := FP16(0.0) + FI + RETURN tmp[15:0] +} +FOR i := 0 to 7 + IF k[i] + dst.fp16[i] := ReduceArgumentFP16(a.fp16[i], imm8) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Extract the reduced argument of packed half-precision (16-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). [round_imm_note] - - DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { - m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) - tmp[15:0] := src[15:0] - tmp[15:0] - IF IsInf(tmp[15:0]) - tmp[15:0] := FP16(0.0) - FI - RETURN tmp[15:0] - } - FOR i := 0 to 7 - IF k[i] - dst.fp16[i] := ReduceArgumentFP16(a.fp16[i], imm8) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Extract the reduced argument of packed half-precision (16-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note] + +DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { + m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) + tmp[15:0] := src[15:0] - tmp[15:0] + IF IsInf(tmp[15:0]) + tmp[15:0] := FP16(0.0) + FI + RETURN tmp[15:0] +} +FOR i := 0 to 7 + IF k[i] + dst.fp16[i] := ReduceArgumentFP16(a.fp16[i], imm8) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Extract the reduced argument of packed half-precision (16-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst". [round_imm_note] - - DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { - m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) - tmp[15:0] := src[15:0] - tmp[15:0] - IF IsInf(tmp[15:0]) - tmp[15:0] := FP16(0.0) - FI - RETURN tmp[15:0] - } - FOR i := 0 to 15 - dst.fp16[i] := ReduceArgumentFP16(a.fp16[i], imm8) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + Extract the reduced argument of packed half-precision (16-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". [round_imm_note] + +DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { + m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) + tmp[15:0] := src[15:0] - tmp[15:0] + IF IsInf(tmp[15:0]) + tmp[15:0] := FP16(0.0) + FI + RETURN tmp[15:0] +} +FOR i := 0 to 15 + dst.fp16[i] := ReduceArgumentFP16(a.fp16[i], imm8) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Extract the reduced argument of packed half-precision (16-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). [round_imm_note] - - DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { - m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) - tmp[15:0] := src[15:0] - tmp[15:0] - IF IsInf(tmp[15:0]) - tmp[15:0] := FP16(0.0) - FI - RETURN tmp[15:0] - } - FOR i := 0 to 15 - IF k[i] - dst.fp16[i] := ReduceArgumentFP16(a.fp16[i], imm8) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + + + Extract the reduced argument of packed half-precision (16-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note] + +DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { + m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) + tmp[15:0] := src[15:0] - tmp[15:0] + IF IsInf(tmp[15:0]) + tmp[15:0] := FP16(0.0) + FI + RETURN tmp[15:0] +} +FOR i := 0 to 15 + IF k[i] + dst.fp16[i] := ReduceArgumentFP16(a.fp16[i], imm8) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Extract the reduced argument of packed half-precision (16-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). [round_imm_note] - - DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { - m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) - tmp[15:0] := src[15:0] - tmp[15:0] - IF IsInf(tmp[15:0]) - tmp[15:0] := FP16(0.0) - FI - RETURN tmp[15:0] - } - FOR i := 0 to 15 - IF k[i] - dst.fp16[i] := ReduceArgumentFP16(a.fp16[i], imm8) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Extract the reduced argument of packed half-precision (16-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note] + +DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { + m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) + tmp[15:0] := src[15:0] - tmp[15:0] + IF IsInf(tmp[15:0]) + tmp[15:0] := FP16(0.0) + FI + RETURN tmp[15:0] +} +FOR i := 0 to 15 + IF k[i] + dst.fp16[i] := ReduceArgumentFP16(a.fp16[i], imm8) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Scale the packed half-precision (16-bit) floating-point elements in "a" using - values from "b", and store the results in "dst". - DEFINE ScaleFP16(src1, src2) { - denormal1 := (a.exp == 0) and (a.fraction != 0) - denormal2 := (b.exp == 0) and (b.fraction != 0) - tmp1 := src1 - tmp2 := src2 - IF MXCSR.DAZ - IF denormal1 + + + + Scale the packed half-precision (16-bit) floating-point elements in "a" using values from "b", and store the results in "dst". + DEFINE ScaleFP16(src1, src2) { + denormal1 := (a.exp == 0) and (a.fraction != 0) + denormal2 := (b.exp == 0) and (b.fraction != 0) + tmp1 := src1 + tmp2 := src2 + IF MXCSR.DAZ + IF denormal1 tmp1 := 0 - FI - IF denormal2 + FI + IF denormal2 tmp2 := 0 - FI - FI - RETURN tmp1 * POW(2.0, FLOOR(tmp2)) - } - FOR i := 0 to 7 - dst.fp16[i] := ScaleFP16(a.fp16[i], b.fp16[i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + FI + FI + RETURN tmp1 * POW(2.0, FLOOR(tmp2)) +} +FOR i := 0 to 7 + dst.fp16[i] := ScaleFP16(a.fp16[i], b.fp16[i]) +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Scale the packed half-precision (16-bit) floating-point elements in "a" using - values from "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - DEFINE ScaleFP16(src1, src2) { - denormal1 := (a.exp == 0) and (a.fraction != 0) - denormal2 := (b.exp == 0) and (b.fraction != 0) - tmp1 := src1 - tmp2 := src2 - IF MXCSR.DAZ - IF denormal1 + + + + + + Scale the packed half-precision (16-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + DEFINE ScaleFP16(src1, src2) { + denormal1 := (a.exp == 0) and (a.fraction != 0) + denormal2 := (b.exp == 0) and (b.fraction != 0) + tmp1 := src1 + tmp2 := src2 + IF MXCSR.DAZ + IF denormal1 tmp1 := 0 - FI - IF denormal2 + FI + IF denormal2 tmp2 := 0 - FI - FI - RETURN tmp1 * POW(2.0, FLOOR(tmp2)) - } - FOR i := 0 to 7 - IF k[i] - dst.fp16[i] := ScaleFP16(a.fp16[i], b.fp16[i]) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + FI + FI + RETURN tmp1 * POW(2.0, FLOOR(tmp2)) +} +FOR i := 0 to 7 + IF k[i] + dst.fp16[i] := ScaleFP16(a.fp16[i], b.fp16[i]) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Scale the packed half-precision (16-bit) floating-point elements in "a" using - values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - DEFINE ScaleFP16(src1, src2) { - denormal1 := (a.exp == 0) and (a.fraction != 0) - denormal2 := (b.exp == 0) and (b.fraction != 0) - tmp1 := src1 - tmp2 := src2 - IF MXCSR.DAZ - IF denormal1 + + + + + Scale the packed half-precision (16-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + DEFINE ScaleFP16(src1, src2) { + denormal1 := (a.exp == 0) and (a.fraction != 0) + denormal2 := (b.exp == 0) and (b.fraction != 0) + tmp1 := src1 + tmp2 := src2 + IF MXCSR.DAZ + IF denormal1 tmp1 := 0 - FI - IF denormal2 + FI + IF denormal2 tmp2 := 0 - FI - FI - RETURN tmp1 * POW(2.0, FLOOR(tmp2)) - } - FOR i := 0 to 7 - IF k[i] - dst.fp16[i] := ScaleFP16(a.fp16[i], b.fp16[i]) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + FI + FI + RETURN tmp1 * POW(2.0, FLOOR(tmp2)) +} +FOR i := 0 to 7 + IF k[i] + dst.fp16[i] := ScaleFP16(a.fp16[i], b.fp16[i]) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Scale the packed half-precision (16-bit) floating-point elements in "a" using - values from "b", and store the results in "dst". - DEFINE ScaleFP16(src1, src2) { - denormal1 := (a.exp == 0) and (a.fraction != 0) - denormal2 := (b.exp == 0) and (b.fraction != 0) - tmp1 := src1 - tmp2 := src2 - IF MXCSR.DAZ - IF denormal1 + + + + Scale the packed half-precision (16-bit) floating-point elements in "a" using values from "b", and store the results in "dst". + DEFINE ScaleFP16(src1, src2) { + denormal1 := (a.exp == 0) and (a.fraction != 0) + denormal2 := (b.exp == 0) and (b.fraction != 0) + tmp1 := src1 + tmp2 := src2 + IF MXCSR.DAZ + IF denormal1 tmp1 := 0 - FI - IF denormal2 + FI + IF denormal2 tmp2 := 0 - FI - FI - RETURN tmp1 * POW(2.0, FLOOR(tmp2)) - } - FOR i := 0 to 15 - dst.fp16[i] := ScaleFP16(a.fp16[i], b.fp16[i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + FI + FI + RETURN tmp1 * POW(2.0, FLOOR(tmp2)) +} +FOR i := 0 to 15 + dst.fp16[i] := ScaleFP16(a.fp16[i], b.fp16[i]) +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - - Scale the packed half-precision (16-bit) floating-point elements in "a" using - values from "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - DEFINE ScaleFP16(src1, src2) { - denormal1 := (a.exp == 0) and (a.fraction != 0) - denormal2 := (b.exp == 0) and (b.fraction != 0) - tmp1 := src1 - tmp2 := src2 - IF MXCSR.DAZ - IF denormal1 + + + + + + Scale the packed half-precision (16-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + DEFINE ScaleFP16(src1, src2) { + denormal1 := (a.exp == 0) and (a.fraction != 0) + denormal2 := (b.exp == 0) and (b.fraction != 0) + tmp1 := src1 + tmp2 := src2 + IF MXCSR.DAZ + IF denormal1 tmp1 := 0 - FI - IF denormal2 + FI + IF denormal2 tmp2 := 0 - FI - FI - RETURN tmp1 * POW(2.0, FLOOR(tmp2)) - } - FOR i := 0 to 15 - IF k[i] - dst.fp16[i] := ScaleFP16(a.fp16[i], b.fp16[i]) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + FI + FI + RETURN tmp1 * POW(2.0, FLOOR(tmp2)) +} +FOR i := 0 to 15 + IF k[i] + dst.fp16[i] := ScaleFP16(a.fp16[i], b.fp16[i]) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Scale the packed half-precision (16-bit) floating-point elements in "a" using - values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - DEFINE ScaleFP16(src1, src2) { - denormal1 := (a.exp == 0) and (a.fraction != 0) - denormal2 := (b.exp == 0) and (b.fraction != 0) - tmp1 := src1 - tmp2 := src2 - IF MXCSR.DAZ - IF denormal1 + + + + + Scale the packed half-precision (16-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + DEFINE ScaleFP16(src1, src2) { + denormal1 := (a.exp == 0) and (a.fraction != 0) + denormal2 := (b.exp == 0) and (b.fraction != 0) + tmp1 := src1 + tmp2 := src2 + IF MXCSR.DAZ + IF denormal1 tmp1 := 0 - FI - IF denormal2 + FI + IF denormal2 tmp2 := 0 - FI - FI - RETURN tmp1 * POW(2.0, FLOOR(tmp2)) - } - FOR i := 0 to 15 - IF k[i] - dst.fp16[i] := ScaleFP16(a.fp16[i], b.fp16[i]) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + FI + FI + RETURN tmp1 * POW(2.0, FLOOR(tmp2)) +} +FOR i := 0 to 15 + IF k[i] + dst.fp16[i] := ScaleFP16(a.fp16[i], b.fp16[i]) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Test packed half-precision (16-bit) floating-point elements in "a" for special - categories specified by "imm8", and store the results in mask vector "k". - [fpclass_note] - FOR i := 0 to 7 - k[i] := CheckFPClass_FP16(a.fp16[i], imm8[7:0]) - ENDFOR - k[MAX:8] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + Test packed half-precision (16-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k". + [fpclass_note] + FOR i := 0 to 7 + k[i] := CheckFPClass_FP16(a.fp16[i], imm8[7:0]) +ENDFOR +k[MAX:8] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Test packed half-precision (16-bit) floating-point elements in "a" for special - categories specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). - [fpclass_note] - FOR i := 0 to 7 - IF k1[i] - k[i] := CheckFPClass_FP16(a.fp16[i], imm8[7:0]) - ELSE - k[i] := 0 - FI - ENDFOR - k[MAX:8] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Test packed half-precision (16-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + [fpclass_note] + FOR i := 0 to 7 + IF k1[i] + k[i] := CheckFPClass_FP16(a.fp16[i], imm8[7:0]) + ELSE + k[i] := 0 + FI +ENDFOR +k[MAX:8] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Test packed half-precision (16-bit) floating-point elements in "a" for special - categories specified by "imm8", and store the results in mask vector "k". + + + + Test packed half-precision (16-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k". [fpclass_note] - FOR i := 0 to 15 - k[i] := CheckFPClass_FP16(a.fp16[i], imm8[7:0]) - ENDFOR - k[MAX:16] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + FOR i := 0 to 15 + k[i] := CheckFPClass_FP16(a.fp16[i], imm8[7:0]) +ENDFOR +k[MAX:16] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Test packed half-precision (16-bit) floating-point elements in "a" for special - categories specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). - [fpclass_note] - FOR i := 0 to 15 - IF k1[i] - k[i] := CheckFPClass_FP16(a.fp16[i], imm8[7:0]) - ELSE - k[i] := 0 - FI - ENDFOR - k[MAX:16] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Test packed half-precision (16-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + [fpclass_note] + FOR i := 0 to 15 + IF k1[i] + k[i] := CheckFPClass_FP16(a.fp16[i], imm8[7:0]) + ELSE + k[i] := 0 + FI +ENDFOR +k[MAX:16] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle half-precision (16-bit) floating-point elements in "a" and "b" using - the corresponding selector and index in "idx", and store the results in "dst". - - FOR j := 0 to 7 - i := j*16 - off := idx[i+2:i] - dst.fp16[j] := idx[i+3] ? b.fp16[off] : a.fp16[off] - ENDFOR - dst[MAX:128] := 0 - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle half-precision (16-bit) floating-point elements in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + off := idx[i+2:i] + dst.fp16[j] := idx[i+3] ? b.fp16[off] : a.fp16[off] +ENDFOR +dst[MAX:128] := 0 + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Shuffle half-precision (16-bit) floating-point elements in "a" and "b" across - lanes using the corresponding selector and index in "idx", and store the results in - "dst". - - FOR j := 0 to 15 - i := j*16 - off := idx[i+3:i] - dst.fp16[j] := idx[i+4] ? b.fp16[off] : a.fp16[off] - ENDFOR - dst[MAX:256] := 0 - - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Shuffle half-precision (16-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + off := idx[i+3:i] + dst.fp16[j] := idx[i+4] ? b.fp16[off] : a.fp16[off] +ENDFOR +dst[MAX:256] := 0 + + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Blend packed half-precision (16-bit) floating-point elements from "a" and "b" - using control mask "k", and store the results in "dst". - - FOR j := 0 to 15 - IF k[j] - dst.fp16[j] := b.fp16[j] - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Blend packed half-precision (16-bit) floating-point elements from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 15 + IF k[j] + dst.fp16[j] := b.fp16[j] + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - - Blend packed half-precision (16-bit) floating-point elements from "a" and "b" - using control mask "k", and store the results in "dst". - - FOR j := 0 to 7 - IF k[j] - dst.fp16[j] := b.fp16[j] - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + + Blend packed half-precision (16-bit) floating-point elements from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 7 + IF k[j] + dst.fp16[j] := b.fp16[j] + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Shuffle half-precision (16-bit) floating-point elements in "a" across lanes - using the corresponding index in "idx", and store the results in "dst". - - FOR j := 0 to 15 - i := j*16 - id := idx[i+3:i] - dst.fp16[j] := a.fp16[id] - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + Shuffle half-precision (16-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst". + +FOR j := 0 to 15 + i := j*16 + id := idx[i+3:i] + dst.fp16[j] := a.fp16[id] +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - - Shuffle half-precision (16-bit) floating-point elements in "a" using the - corresponding index in "idx", and store the results in "dst". - - FOR j := 0 to 7 - i := j*16 - id := idx[i+2:i] - dst.fp16[j] := a.fp16[id] - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Miscellaneous + + + + Shuffle half-precision (16-bit) floating-point elements in "a" using the corresponding index in "idx", and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + id := idx[i+2:i] + dst.fp16[j] := a.fp16[id] +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Miscellaneous
- - - Compute the approximate reciprocal square root of packed half-precision - (16-bit) floating-point elements in "a", and store the results in "dst". The maximum - relative error for this approximation is less than 1.5*2^-12. - - FOR i := 0 to 7 - dst.fp16[i] := (1.0 / SQRT(a.fp16[i])) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Elementary Math Functions + + + Compute the approximate reciprocal square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 1.5*2^-12. + +FOR i := 0 to 7 + dst.fp16[i] := (1.0 / SQRT(a.fp16[i])) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the approximate reciprocal square root of packed half-precision - (16-bit) floating-point elements in "a", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). The - maximum relative error for this approximation is less than 1.5*2^-12. - - FOR i := 0 to 7 - IF k[i] - dst.fp16[i] := (1.0 / SQRT(a.fp16[i])) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Elementary Math Functions + + + + + Compute the approximate reciprocal square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 1.5*2^-12. + +FOR i := 0 to 7 + IF k[i] + dst.fp16[i] := (1.0 / SQRT(a.fp16[i])) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Elementary Math Functions
- - - - Compute the approximate reciprocal square root of packed half-precision - (16-bit) floating-point elements in "a", and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 1.5*2^-12. - - FOR i := 0 to 7 - IF k[i] - dst.fp16[i] := (1.0 / SQRT(a.fp16[i])) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Elementary Math Functions + + + + Compute the approximate reciprocal square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 1.5*2^-12. + +FOR i := 0 to 7 + IF k[i] + dst.fp16[i] := (1.0 / SQRT(a.fp16[i])) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Elementary Math Functions
- - - Compute the approximate reciprocal square root of packed half-precision - (16-bit) floating-point elements in "a", and store the results in "dst". The maximum - relative error for this approximation is less than 1.5*2^-12. - - FOR i := 0 to 15 - dst.fp16[i] := (1.0 / SQRT(a.fp16[i])) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Elementary Math Functions + + + Compute the approximate reciprocal square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 1.5*2^-12. + +FOR i := 0 to 15 + dst.fp16[i] := (1.0 / SQRT(a.fp16[i])) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the approximate reciprocal square root of packed half-precision - (16-bit) floating-point elements in "a", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). The - maximum relative error for this approximation is less than 1.5*2^-12. - - FOR i := 0 to 15 - IF k[i] - dst.fp16[i] := (1.0 / SQRT(a.fp16[i])) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Elementary Math Functions + + + + + Compute the approximate reciprocal square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 1.5*2^-12. + +FOR i := 0 to 15 + IF k[i] + dst.fp16[i] := (1.0 / SQRT(a.fp16[i])) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Elementary Math Functions
- - - - Compute the approximate reciprocal square root of packed half-precision - (16-bit) floating-point elements in "a", and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 1.5*2^-12. - - FOR i := 0 to 15 - IF k[i] - dst.fp16[i] := (1.0 / SQRT(a.fp16[i])) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Elementary Math Functions + + + + Compute the approximate reciprocal square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 1.5*2^-12. + +FOR i := 0 to 15 + IF k[i] + dst.fp16[i] := (1.0 / SQRT(a.fp16[i])) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Elementary Math Functions
- - - Compute the square root of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst". - - FOR i := 0 to 7 - dst.fp16[i] := SQRT(a.fp16[i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Elementary Math Functions + + + Compute the square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". + +FOR i := 0 to 7 + dst.fp16[i] := SQRT(a.fp16[i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the square root of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR i := 0 to 7 - IF k[i] - dst.fp16[i] := SQRT(a.fp16[i]) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Elementary Math Functions + + + + + Compute the square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR i := 0 to 7 + IF k[i] + dst.fp16[i] := SQRT(a.fp16[i]) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Elementary Math Functions
- - - - Compute the square root of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR i := 0 to 7 - IF k[i] - dst.fp16[i] := SQRT(a.fp16[i]) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Elementary Math Functions + + + + Compute the square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR i := 0 to 7 + IF k[i] + dst.fp16[i] := SQRT(a.fp16[i]) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Elementary Math Functions
- - - Compute the square root of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst". - - FOR i := 0 to 15 - dst.fp16[i] := SQRT(a.fp16[i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Elementary Math Functions + + + Compute the square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". + +FOR i := 0 to 15 + dst.fp16[i] := SQRT(a.fp16[i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the square root of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR i := 0 to 15 - IF k[i] - dst.fp16[i] := SQRT(a.fp16[i]) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Elementary Math Functions + + + + + Compute the square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR i := 0 to 15 + IF k[i] + dst.fp16[i] := SQRT(a.fp16[i]) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Elementary Math Functions
- - - - Compute the square root of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR i := 0 to 15 - IF k[i] - dst.fp16[i] := SQRT(a.fp16[i]) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Elementary Math Functions + + + + Compute the square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR i := 0 to 15 + IF k[i] + dst.fp16[i] := SQRT(a.fp16[i]) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Elementary Math Functions
- - - Compute the approximate reciprocal of packed half-precision (16-bit) - floating-point elements in "a", and store the results in "dst". The maximum relative - error for this approximation is less than 1.5*2^-12. - - FOR i := 0 to 7 - dst.fp16[i] := (1.0 / a.fp16[i]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Elementary Math Functions + + + Compute the approximate reciprocal of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 1.5*2^-12. + +FOR i := 0 to 7 + dst.fp16[i] := (1.0 / a.fp16[i]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the approximate reciprocal of packed half-precision (16-bit) - floating-point elements in "a", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 1.5*2^-12. - - FOR i := 0 to 7 - IF k[i] - dst.fp16[i] := (1.0 / a.fp16[i]) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Elementary Math Functions + + + + + Compute the approximate reciprocal of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 1.5*2^-12. + +FOR i := 0 to 7 + IF k[i] + dst.fp16[i] := (1.0 / a.fp16[i]) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Elementary Math Functions
- - - - Compute the approximate reciprocal of packed half-precision (16-bit) - floating-point elements in "a", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 1.5*2^-12. - - FOR i := 0 to 7 - IF k[i] - dst.fp16[i] := (1.0 / a.fp16[i]) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Elementary Math Functions + + + + Compute the approximate reciprocal of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 1.5*2^-12. + +FOR i := 0 to 7 + IF k[i] + dst.fp16[i] := (1.0 / a.fp16[i]) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Elementary Math Functions
- - - Compute the approximate reciprocal of packed half-precision (16-bit) - floating-point elements in "a", and store the results in "dst". The maximum relative - error for this approximation is less than 1.5*2^-12. - - FOR i := 0 to 15 - dst.fp16[i] := (1.0 / a.fp16[i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Elementary Math Functions + + + Compute the approximate reciprocal of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 1.5*2^-12. + +FOR i := 0 to 15 + dst.fp16[i] := (1.0 / a.fp16[i]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the approximate reciprocal of packed half-precision (16-bit) - floating-point elements in "a", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 1.5*2^-12. - - FOR i := 0 to 15 - IF k[i] - dst.fp16[i] := (1.0 / a.fp16[i]) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Elementary Math Functions + + + + + Compute the approximate reciprocal of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 1.5*2^-12. + +FOR i := 0 to 15 + IF k[i] + dst.fp16[i] := (1.0 / a.fp16[i]) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Elementary Math Functions
- - - - Compute the approximate reciprocal of packed half-precision (16-bit) - floating-point elements in "a", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 1.5*2^-12. - - FOR i := 0 to 15 - IF k[i] - dst.fp16[i] := (1.0 / a.fp16[i]) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Elementary Math Functions + + + + Compute the approximate reciprocal of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 1.5*2^-12. + +FOR i := 0 to 15 + IF k[i] + dst.fp16[i] := (1.0 / a.fp16[i]) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Elementary Math Functions
- - - Load 256-bits (composed of 16 packed half-precision (16-bit) floating-point - elements) from memory into "dst". - "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may - be generated. - - dst[255:0] := MEM[mem_addr+255:mem_addr] - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Load + + + Load 256-bits (composed of 16 packed half-precision (16-bit) floating-point elements) from memory into "dst". + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +dst[255:0] := MEM[mem_addr+255:mem_addr] +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Load
- - - Load 128-bits (composed of 8 packed half-precision (16-bit) floating-point - elements) from memory into "dst". - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - dst[127:0] := MEM[mem_addr+127:mem_addr] - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Load + + + Load 128-bits (composed of 8 packed half-precision (16-bit) floating-point elements) from memory into "dst". + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +dst[127:0] := MEM[mem_addr+127:mem_addr] +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Load
- - - Load 256-bits (composed of 16 packed half-precision (16-bit) floating-point - elements) from memory into "dst". - "mem_addr" does not need to be aligned on any particular boundary. - - dst[255:0] := MEM[mem_addr+255:mem_addr] - dst[MAX:256] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Load + + + Load 256-bits (composed of 16 packed half-precision (16-bit) floating-point elements) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[255:0] := MEM[mem_addr+255:mem_addr] +dst[MAX:256] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Load
- - - Load 128-bits (composed of 8 packed half-precision (16-bit) floating-point - elements) from memory into "dst". - "mem_addr" does not need to be aligned on any particular boundary. - - dst[127:0] := MEM[mem_addr+127:mem_addr] - dst[MAX:128] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Load + + + Load 128-bits (composed of 8 packed half-precision (16-bit) floating-point elements) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[127:0] := MEM[mem_addr+127:mem_addr] +dst[MAX:128] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Load
- - - - Store 256-bits (composed of 16 packed half-precision (16-bit) floating-point - elements) from "a" into memory. - "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+255:mem_addr] := a[255:0] - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Store + + + + Store 256-bits (composed of 16 packed half-precision (16-bit) floating-point elements) from "a" into memory. + "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+255:mem_addr] := a[255:0] + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Store
- - - - Store 128-bits (composed of 8 packed half-precision (16-bit) floating-point - elements) from "a" into memory. - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+127:mem_addr] := a[127:0] - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Store + + + + Store 128-bits (composed of 8 packed half-precision (16-bit) floating-point elements) from "a" into memory. + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+127:mem_addr] := a[127:0] + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Store
- - - - Store 256-bits (composed of 16 packed half-precision (16-bit) floating-point - elements) from "a" into memory. - "mem_addr" does not need to be aligned on any particular boundary. - - MEM[mem_addr+255:mem_addr] := a[255:0] - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Store + + + + Store 256-bits (composed of 16 packed half-precision (16-bit) floating-point elements) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+255:mem_addr] := a[255:0] + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Store
- - - - Store 128-bits (composed of 8 packed half-precision (16-bit) floating-point - elements) from "a" into memory. - "mem_addr" does not need to be aligned on any particular boundary. - - MEM[mem_addr+127:mem_addr] := a[127:0] - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Store + + + + Store 128-bits (composed of 8 packed half-precision (16-bit) floating-point elements) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+127:mem_addr] := a[127:0] + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Store
- - - Return vector of type __m256h with undefined elements. - AVX512_FP16 - AVX512VL -
immintrin.h
- General Support + + + Return vector of type __m256h with undefined elements. + AVX512_FP16 + AVX512VL +
immintrin.h
+ General Support
- - - Return vector of type __m128h with undefined elements. - AVX512_FP16 - AVX512VL -
immintrin.h
- General Support + + + Return vector of type __m128h with undefined elements. + AVX512_FP16 + AVX512VL +
immintrin.h
+ General Support
- - - Return vector of type __m256h with all elements set to zero. - - dst[MAX:0] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Set + + + Return vector of type __m256h with all elements set to zero. + +dst[MAX:0] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Set
- - - Return vector of type __m128h with all elements set to zero. - - dst[MAX:0] := 0 - - - AVX512_FP16 - AVX512VL -
immintrin.h
- Set -
- - - - - - - Add packed half-precision (16-bit) floating-point elements in "a" and "b", and - store the results in "dst". - - FOR j := 0 TO 31 - dst.fp16[j] := a.fp16[j] + b.fp16[j] - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + Return vector of type __m128h with all elements set to zero. + +dst[MAX:0] := 0 + + + AVX512_FP16 + AVX512VL +
immintrin.h
+ Set +
+ + + + + + + Add packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 TO 31 + dst.fp16[j] := a.fp16[j] + b.fp16[j] +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Add packed half-precision (16-bit) floating-point elements in "a" and "b", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 TO 31 - IF k[j] - dst.fp16[j] := a.fp16[j] + b.fp16[j] - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Add packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 31 + IF k[j] + dst.fp16[j] := a.fp16[j] + b.fp16[j] + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Add packed half-precision (16-bit) floating-point elements in "a" and "b", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 TO 31 - IF k[j] - dst.fp16[j] := a.fp16[j] + b.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Add packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 31 + IF k[j] + dst.fp16[j] := a.fp16[j] + b.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Add packed half-precision (16-bit) floating-point elements in "a" and "b", and - store the results in "dst". - [round_note] - - FOR j := 0 TO 31 - dst.fp16[j] := a.fp16[j] + b.fp16[j] - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Add packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst". + [round_note] + +FOR j := 0 TO 31 + dst.fp16[j] := a.fp16[j] + b.fp16[j] +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Add packed half-precision (16-bit) floating-point elements in "a" and "b", and - store the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - [round_note] - - FOR j := 0 TO 31 - IF k[j] - dst.fp16[j] := a.fp16[j] + b.fp16[j] - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Add packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 31 + IF k[j] + dst.fp16[j] := a.fp16[j] + b.fp16[j] + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Add packed half-precision (16-bit) floating-point elements in "a" and "b", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - [round_note] - - FOR j := 0 TO 31 - IF k[j] - dst.fp16[j] := a.fp16[j] + b.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Add packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 31 + IF k[j] + dst.fp16[j] := a.fp16[j] + b.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - Add the lower half-precision (16-bit) floating-point elements in "a" and "b", - store the result in the lower element of "dst", and copy the upper 7 packed elements - from "a" to the upper elements of "dst". - - dst.fp16[0] := a.fp16[0] + b.fp16[0] - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + Add the lower half-precision (16-bit) floating-point elements in "a" and "b", store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +dst.fp16[0] := a.fp16[0] + b.fp16[0] +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Add the lower half-precision (16-bit) floating-point elements in "a" and "b", - store the result in the lower element of "dst", and copy the upper 7 packed elements - from "a" to the upper elements of "dst". - [round_note] - - dst.fp16[0] := a.fp16[0] + b.fp16[0] - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Add the lower half-precision (16-bit) floating-point elements in "a" and "b", store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst.fp16[0] := a.fp16[0] + b.fp16[0] +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Add the lower half-precision (16-bit) floating-point elements in "a" and "b", - store the result in the lower element of "dst" using writemask "k" (the element is - copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from - "a" to the upper elements of "dst". - - IF k[0] - dst.fp16[0] := a.fp16[0] + b.fp16[0] - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Add the lower half-precision (16-bit) floating-point elements in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := a.fp16[0] + b.fp16[0] +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Add the lower half-precision (16-bit) floating-point elements in "a" and "b", - store the result in the lower element of "dst" using writemask "k" (the element is - copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from - "a" to the upper elements of "dst". - [round_note] - - IF k[0] - dst.fp16[0] := a.fp16[0] + b.fp16[0] - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Add the lower half-precision (16-bit) floating-point elements in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst.fp16[0] := a.fp16[0] + b.fp16[0] +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Add the lower half-precision (16-bit) floating-point elements in "a" and "b", - store the result in the lower element of "dst" using zeromask "k" (the element is zeroed - out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the - upper elements of "dst". - - IF k[0] - dst.fp16[0] := a.fp16[0] + b.fp16[0] - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Add the lower half-precision (16-bit) floating-point elements in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := a.fp16[0] + b.fp16[0] +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Add the lower half-precision (16-bit) floating-point elements in "a" and "b", - store the result in the lower element of "dst" using zeromask "k" (the element is zeroed - out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the - upper elements of "dst". - [round_note] - - IF k[0] - dst.fp16[0] := a.fp16[0] + b.fp16[0] - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Add the lower half-precision (16-bit) floating-point elements in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst.fp16[0] := a.fp16[0] + b.fp16[0] +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - Divide packed half-precision (16-bit) floating-point elements in "a" by packed - elements in "b", and store the results in "dst". - - FOR j := 0 to 31 - dst.fp16[j] := a.fp16[j] / b.fp16[j] - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + Divide packed half-precision (16-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 31 + dst.fp16[j] := a.fp16[j] / b.fp16[j] +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Divide packed half-precision (16-bit) floating-point elements in "a" by packed - elements in "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := a.fp16[j] / b.fp16[j] - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Divide packed half-precision (16-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := a.fp16[j] / b.fp16[j] + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Divide packed half-precision (16-bit) floating-point elements in "a" by packed - elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := a.fp16[j] / b.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Divide packed half-precision (16-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := a.fp16[j] / b.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Divide packed half-precision (16-bit) floating-point elements in "a" by packed - elements in "b", and store the results in "dst". - [round_note] - - FOR j := 0 to 31 - dst.fp16[j] := a.fp16[j] / b.fp16[j] - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Divide packed half-precision (16-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst". + [round_note] + +FOR j := 0 to 31 + dst.fp16[j] := a.fp16[j] / b.fp16[j] +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Divide packed half-precision (16-bit) floating-point elements in "a" by packed - elements in "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := a.fp16[j] / b.fp16[j] - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Divide packed half-precision (16-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := a.fp16[j] / b.fp16[j] + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Divide packed half-precision (16-bit) floating-point elements in "a" by packed - elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := a.fp16[j] / b.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Divide packed half-precision (16-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := a.fp16[j] / b.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - Divide the lower half-precision (16-bit) floating-point element in "a" by the - lower half-precision (16-bit) floating-point element in "b", store the result in the - lower element of "dst", and copy the upper 7 packed elements from "a" to the upper - elements of "dst". - - dst.fp16[0] := a.fp16[0] / b.fp16[0] - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + Divide the lower half-precision (16-bit) floating-point element in "a" by the lower half-precision (16-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +dst.fp16[0] := a.fp16[0] / b.fp16[0] +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Divide the lower half-precision (16-bit) floating-point element in "a" by the - lower half-precision (16-bit) floating-point element in "b", store the result in the - lower element of "dst" using writemask "k" (the element is copied from "src" when mask - bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements - of "dst". - - IF k[0] - dst.fp16[0] := a.fp16[0] / b.fp16[0] - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Divide the lower half-precision (16-bit) floating-point element in "a" by the lower half-precision (16-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := a.fp16[0] / b.fp16[0] +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Divide the lower half-precision (16-bit) floating-point element in "a" by the - lower half-precision (16-bit) floating-point element in "b", store the result in the - lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is - not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". - - IF k[0] - dst.fp16[0] := a.fp16[0] / b.fp16[0] - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Divide the lower half-precision (16-bit) floating-point element in "a" by the lower half-precision (16-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := a.fp16[0] / b.fp16[0] +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Divide the lower half-precision (16-bit) floating-point element in "a" by the - lower half-precision (16-bit) floating-point element in "b", store the result in the - lower element of "dst", and copy the upper 7 packed elements from "a" to the upper - elements of "dst". - [round_note] - - dst.fp16[0] := a.fp16[0] / b.fp16[0] - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Divide the lower half-precision (16-bit) floating-point element in "a" by the lower half-precision (16-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst.fp16[0] := a.fp16[0] / b.fp16[0] +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Divide the lower half-precision (16-bit) floating-point element in "a" by the - lower half-precision (16-bit) floating-point element in "b", store the result in the - lower element of "dst" using writemask "k" (the element is copied from "src" when mask - bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements - of "dst". - [round_note] - - IF k[0] - dst.fp16[0] := a.fp16[0] / b.fp16[0] - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Divide the lower half-precision (16-bit) floating-point element in "a" by the lower half-precision (16-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst.fp16[0] := a.fp16[0] / b.fp16[0] +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Divide the lower half-precision (16-bit) floating-point element in "a" by the - lower half-precision (16-bit) floating-point element in "b", store the result in the - lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is - not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". - [round_note] - - IF k[0] - dst.fp16[0] := a.fp16[0] / b.fp16[0] - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Divide the lower half-precision (16-bit) floating-point element in "a" by the lower half-precision (16-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst.fp16[0] := a.fp16[0] / b.fp16[0] +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the intermediate result to packed elements in "c", and store the results in "dst". - - FOR j := 0 to 31 - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst". + +FOR j := 0 to 31 + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the intermediate result to packed elements in "c", and store the results in "dst" - using writemask "k" (elements are copied from "a" when the corresponding mask bit is not - set). - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the intermediate result to packed elements in "c", and store the results in "dst" - using writemask "k" (elements are copied from "c" when the corresponding mask bit is not - set). - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := c.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := c.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the intermediate result to packed elements in "c", and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the intermediate result to packed elements in "c", and store the results in "dst". - [round_note] - - FOR j := 0 to 31 - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst". + [round_note] + +FOR j := 0 to 31 + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the intermediate result to packed elements in "c", and store the results in "dst" - using writemask "k" (elements are copied from "a" when the corresponding mask bit is not - set). - [round_note] - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the intermediate result to packed elements in "c", and store the results in "dst" - using writemask "k" (elements are copied from "c" when the corresponding mask bit is not - set). - [round_note] - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := c.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := c.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the intermediate result to packed elements in "c", and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and add the intermediate result to the lower element in "c". Store the result in - the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper - elements of "dst". - - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + c.fp16[0] - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + c.fp16[0] +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and add the intermediate result to the lower element in "c". Store the result in - the lower element of "dst" using writemask "k" (the element is copied from "a" when mask - bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements - of "dst". - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + c.fp16[0] - ELSE - dst.fp16[0] := a.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + c.fp16[0] +ELSE + dst.fp16[0] := a.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and add the intermediate result to the lower element in "c". Store the result in - the lower element of "dst" using writemask "k" (the element is copied from "c" when mask - bit 0 is not set), and copy the upper 7 packed elements from "c" to the upper elements - of "dst". - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + c.fp16[0] - ELSE - dst.fp16[0] := c.fp16[0] - FI - dst[127:16] := c[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 7 packed elements from "c" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + c.fp16[0] +ELSE + dst.fp16[0] := c.fp16[0] +FI +dst[127:16] := c[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and add the intermediate result to the lower element in "c". Store the result in - the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 - is not set), and copy the upper 7 packed elements from "a" to the upper elements of - "dst". - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + c.fp16[0] - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + c.fp16[0] +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and add the intermediate result to the lower element in "c". Store the result in - the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper - elements of "dst". - [round_note] - - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + c.fp16[0] - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + c.fp16[0] +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and add the intermediate result to the lower element in "c". Store the result in - the lower element of "dst" using writemask "k" (the element is copied from "a" when mask - bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements - of "dst". - [round_note] - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + c.fp16[0] - ELSE - dst.fp16[0] := a.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + c.fp16[0] +ELSE + dst.fp16[0] := a.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and add the intermediate result to the lower element in "c". Store the result in - the lower element of "dst" using writemask "k" (the element is copied from "c" when mask - bit 0 is not set), and copy the upper 7 packed elements from "c" to the upper elements - of "dst". - [round_note] - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + c.fp16[0] - ELSE - dst.fp16[0] := c.fp16[0] - FI - dst[127:16] := c[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 7 packed elements from "c" to the upper elements of "dst". + [round_note] + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + c.fp16[0] +ELSE + dst.fp16[0] := c.fp16[0] +FI +dst[127:16] := c[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and add the intermediate result to the lower element in "c". Store the result in - the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 - is not set), and copy the upper 7 packed elements from "a" to the upper elements of - "dst". - [round_note] - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + c.fp16[0] - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + c.fp16[0] +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the negated intermediate result to packed elements in "c", and store the results in - "dst". - - FOR j := 0 to 31 - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst". + +FOR j := 0 to 31 + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the negated intermediate result to packed elements in "c", and store the results in - "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit - is not set). - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the negated intermediate result to packed elements in "c", and store the results in - "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit - is not set). - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := c.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := c.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the negated intermediate result to packed elements in "c", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the negated intermediate result to packed elements in "c", and store the results in - "dst". - [round_note] - - FOR j := 0 to 31 - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst". + [round_note] + +FOR j := 0 to 31 + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the negated intermediate result to packed elements in "c", and store the results in - "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit - is not set). - [round_note] - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the negated intermediate result to packed elements in "c", and store the results in - "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit - is not set). - [round_note] - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := c.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := c.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - add the negated intermediate result to packed elements in "c", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - [round_note] - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and add the negated intermediate result to the lower element in "c". Store the - result in the lower element of "dst", and copy the upper 7 packed elements from "a" to - the upper elements of "dst". - - dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) + c.fp16[0] - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) + c.fp16[0] +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and add the negated intermediate result to the lower element in "c". Store the - result in the lower element of "dst" using writemask "k" (the element is copied from "a" - when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper - elements of "dst". - - IF k[0] - dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) + c.fp16[0] - ELSE - dst.fp16[0] := a.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) + c.fp16[0] +ELSE + dst.fp16[0] := a.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and add the negated intermediate result to the lower element in "c". Store the - result in the lower element of "dst" using writemask "k" (the element is copied from "c" - when mask bit 0 is not set), and copy the upper 7 packed elements from "c" to the upper - elements of "dst". - - IF k[0] - dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) + c.fp16[0] - ELSE - dst.fp16[0] := c.fp16[0] - FI - dst[127:16] := c[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 7 packed elements from "c" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) + c.fp16[0] +ELSE + dst.fp16[0] := c.fp16[0] +FI +dst[127:16] := c[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and add the negated intermediate result to the lower element in "c". Store the - result in the lower element of "dst" using zeromask "k" (the element is zeroed out when - mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper - elements of "dst". - - IF k[0] - dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) + c.fp16[0] - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) + c.fp16[0] +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and add the negated intermediate result to the lower element in "c". Store the - result in the lower element of "dst", and copy the upper 7 packed elements from "a" to - the upper elements of "dst". - [round_note] - - dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) + c.fp16[0] - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) + c.fp16[0] +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and add the negated intermediate result to the lower element in "c". Store the - result in the lower element of "dst" using writemask "k" (the element is copied from "a" - when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper - elements of "dst". - [round_note] - - IF k[0] - dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) + c.fp16[0] - ELSE - dst.fp16[0] := a.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) + c.fp16[0] +ELSE + dst.fp16[0] := a.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and add the negated intermediate result to the lower element in "c". Store the - result in the lower element of "dst" using writemask "k" (the element is copied from "c" - when mask bit 0 is not set), and copy the upper 7 packed elements from "c" to the upper - elements of "dst". - [round_note] - - IF k[0] - dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) + c.fp16[0] - ELSE - dst.fp16[0] := c.fp16[0] - FI - dst[127:16] := c[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 7 packed elements from "c" to the upper elements of "dst". + [round_note] + +IF k[0] + dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) + c.fp16[0] +ELSE + dst.fp16[0] := c.fp16[0] +FI +dst[127:16] := c[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and add the negated intermediate result to the lower element in "c". Store the - result in the lower element of "dst" using zeromask "k" (the element is zeroed out when - mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper - elements of "dst". - [round_note] - - IF k[0] - dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) + c.fp16[0] - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) + c.fp16[0] +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the intermediate result, and store the results in - "dst". - - FOR j := 0 to 31 - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst". + +FOR j := 0 to 31 + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the intermediate result, and store the results in - "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit - is not set). - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the intermediate result, and store the results in - "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit - is not set). - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := c.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := c.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the intermediate result, and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the intermediate result, and store the results in - "dst". - [round_note] - - FOR j := 0 to 31 - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst". + [round_note] + +FOR j := 0 to 31 + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the intermediate result, and store the results in - "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit - is not set). - [round_note] - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the intermediate result, and store the results in - "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit - is not set). - [round_note] - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := c.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := c.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the intermediate result, and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - [round_note] - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the intermediate result. Store the - result in the lower element of "dst", and copy the upper 7 packed elements from "a" to - the upper elements of "dst". - - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - c.fp16[0] - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - c.fp16[0] +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the intermediate result. Store the - result in the lower element of "dst" using writemask "k" (the element is copied from "a" - when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper - elements of "dst". - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - c.fp16[0] - ELSE - dst.fp16[0] := a.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - c.fp16[0] +ELSE + dst.fp16[0] := a.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the intermediate result. Store the - result in the lower element of "dst" using writemask "k" (the element is copied from "c" - when mask bit 0 is not set), and copy the upper 7 packed elements from "c" to the upper - elements of "dst". - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - c.fp16[0] - ELSE - dst.fp16[0] := c.fp16[0] - FI - dst[127:16] := c[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 7 packed elements from "c" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - c.fp16[0] +ELSE + dst.fp16[0] := c.fp16[0] +FI +dst[127:16] := c[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the intermediate result. Store the - result in the lower element of "dst" using zeromask "k" (the element is zeroed out when - mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper - elements of "dst". - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - c.fp16[0] - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - c.fp16[0] +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the intermediate result. Store the - result in the lower element of "dst", and copy the upper 7 packed elements from "a" to - the upper elements of "dst". - [round_note] - - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - c.fp16[0] - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - c.fp16[0] +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the intermediate result. Store the - result in the lower element of "dst" using writemask "k" (the element is copied from "a" - when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper - elements of "dst". - [round_note] - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - c.fp16[0] - ELSE - dst.fp16[0] := a.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - c.fp16[0] +ELSE + dst.fp16[0] := a.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the intermediate result. Store the - result in the lower element of "dst" using writemask "k" (the element is copied from "c" - when mask bit 0 is not set), and copy the upper 7 packed elements from "c" to the upper - elements of "dst". - [round_note] - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - c.fp16[0] - ELSE - dst.fp16[0] := c.fp16[0] - FI - dst[127:16] := c[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 7 packed elements from "c" to the upper elements of "dst". + [round_note] + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - c.fp16[0] +ELSE + dst.fp16[0] := c.fp16[0] +FI +dst[127:16] := c[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the intermediate result. Store the - result in the lower element of "dst" using zeromask "k" (the element is zeroed out when - mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper - elements of "dst". - [round_note] - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - c.fp16[0] - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - c.fp16[0] +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst". - - FOR j := 0 to 31 - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst". + +FOR j := 0 to 31 + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using writemask "k" (elements are copied from "c" when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := c.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := c.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst". - [round_note] - - FOR j := 0 to 31 - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst". + [round_note] + +FOR j := 0 to 31 + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using writemask "k" (elements are copied from "c" when the - corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := c.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := c.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask - bit is not set). - [round_note] - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := -(a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the negated intermediate result. Store - the result in the lower element of "dst", and copy the upper 7 packed elements from "a" - to the upper elements of "dst". - - dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) - c.fp16[0] - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) - c.fp16[0] +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the negated intermediate result. Store - the result in the lower element of "dst" using writemask "k" (the element is copied from - "a" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the - upper elements of "dst". - - IF k[0] - dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) - c.fp16[0] - ELSE - dst.fp16[0] := a.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) - c.fp16[0] +ELSE + dst.fp16[0] := a.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the negated intermediate result. Store - the result in the lower element of "dst" using writemask "k" (the element is copied from - "c" when mask bit 0 is not set), and copy the upper 7 packed elements from "c" to the - upper elements of "dst". - - IF k[0] - dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) - c.fp16[0] - ELSE - dst.fp16[0] := c.fp16[0] - FI - dst[127:16] := c[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 7 packed elements from "c" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) - c.fp16[0] +ELSE + dst.fp16[0] := c.fp16[0] +FI +dst[127:16] := c[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the negated intermediate result. Store - the result in the lower element of "dst" using zeromask "k" (the element is zeroed out - when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper - elements of "dst". - - IF k[0] - dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) - c.fp16[0] - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) - c.fp16[0] +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the negated intermediate result. Store - the result in the lower element of "dst", and copy the upper 7 packed elements from "a" - to the upper elements of "dst". - [round_note] - - dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) - c.fp16[0] - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) - c.fp16[0] +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the negated intermediate result. Store - the result in the lower element of "dst" using writemask "k" (the element is copied from - "a" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the - upper elements of "dst". - [round_note] - - IF k[0] - dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) - c.fp16[0] - ELSE - dst.fp16[0] := a.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) - c.fp16[0] +ELSE + dst.fp16[0] := a.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the negated intermediate result. Store - the result in the lower element of "dst" using writemask "k" (the element is copied from - "c" when mask bit 0 is not set), and copy the upper 7 packed elements from "c" to the - upper elements of "dst". - [round_note] - - IF k[0] - dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) - c.fp16[0] - ELSE - dst.fp16[0] := c.fp16[0] - FI - dst[127:16] := c[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 7 packed elements from "c" to the upper elements of "dst". + [round_note] + +IF k[0] + dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) - c.fp16[0] +ELSE + dst.fp16[0] := c.fp16[0] +FI +dst[127:16] := c[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower half-precision (16-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the negated intermediate result. Store - the result in the lower element of "dst" using zeromask "k" (the element is zeroed out - when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper - elements of "dst". - [round_note] - - IF k[0] - dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) - c.fp16[0] - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower half-precision (16-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst.fp16[0] := -(a.fp16[0] * b.fp16[0]) - c.fp16[0] +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively add and subtract packed elements in "c" to/from the intermediate result, - and store the results in "dst". - - FOR j := 0 to 31 - IF ((j & 1) == 0) - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst". + +FOR j := 0 to 31 + IF ((j & 1) == 0) + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively add and subtract packed elements in "c" to/from the intermediate result, - and store the results in "dst" using writemask "k" (elements are copied from "a" when - the corresponding mask bit is not set). - - FOR j := 0 to 31 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + IF k[j] + IF ((j & 1) == 0) dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE + ELSE dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - FI - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + FI + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively add and subtract packed elements in "c" to/from the intermediate result, - and store the results in "dst" using writemask "k" (elements are copied from "c" when - the corresponding mask bit is not set). - - FOR j := 0 to 31 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + IF k[j] + IF ((j & 1) == 0) dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE + ELSE dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - FI - ELSE - dst.fp16[j] := c.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + FI + ELSE + dst.fp16[j] := c.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively add and subtract packed elements in "c" to/from the intermediate result, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + IF k[j] + IF ((j & 1) == 0) dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE + ELSE dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - FI - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + FI + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively add and subtract packed elements in "c" to/from the intermediate result, - and store the results in "dst". - [round_note] - - FOR j := 0 to 31 - IF ((j & 1) == 0) - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst". + [round_note] + +FOR j := 0 to 31 + IF ((j & 1) == 0) + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] + ELSE + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively add and subtract packed elements in "c" to/from the intermediate result, - and store the results in "dst" using writemask "k" (elements are copied from "a" when - the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 31 - IF k[j] - IF ((j & 1) == 0) + + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 31 + IF k[j] + IF ((j & 1) == 0) dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE + ELSE dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - FI - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + FI + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively add and subtract packed elements in "c" to/from the intermediate result, - and store the results in "dst" using writemask "k" (elements are copied from "c" when - the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 31 - IF k[j] - IF ((j & 1) == 0) + + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 31 + IF k[j] + IF ((j & 1) == 0) dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE + ELSE dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - FI - ELSE - dst.fp16[j] := c.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + FI + ELSE + dst.fp16[j] := c.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively add and subtract packed elements in "c" to/from the intermediate result, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 31 - IF k[j] - IF ((j & 1) == 0) + + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 31 + IF k[j] + IF ((j & 1) == 0) dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - ELSE + ELSE dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - FI - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + FI + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively subtract and add packed elements in "c" to/from the intermediate result, - and store the results in "dst". - - FOR j := 0 to 31 - IF ((j & 1) == 0) - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" to/from the intermediate result, and store the results in "dst". + +FOR j := 0 to 31 + IF ((j & 1) == 0) + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively subtract and add packed elements in "c" to/from the intermediate result, - and store the results in "dst" using writemask "k" (elements are copied from "a" when - the corresponding mask bit is not set). - - FOR j := 0 to 31 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + IF k[j] + IF ((j & 1) == 0) dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE + ELSE dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - FI - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + FI + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively subtract and add packed elements in "c" to/from the intermediate result, - and store the results in "dst" using writemask "k" (elements are copied from "c" when - the corresponding mask bit is not set). - - FOR j := 0 to 31 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + IF k[j] + IF ((j & 1) == 0) dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE + ELSE dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - FI - ELSE - dst.fp16[j] := c.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + FI + ELSE + dst.fp16[j] := c.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively subtract and add packed elements in "c" to/from the intermediate result, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 31 - IF k[j] - IF ((j & 1) == 0) + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + IF k[j] + IF ((j & 1) == 0) dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE + ELSE dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - FI - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + FI + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively subtract and add packed elements in "c" to/from the intermediate result, - and store the results in "dst". - [round_note] - - FOR j := 0 to 31 - IF ((j & 1) == 0) - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE - dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" to/from the intermediate result, and store the results in "dst". + [round_note] + +FOR j := 0 to 31 + IF ((j & 1) == 0) + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] + ELSE + dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively subtract and add packed elements in "c" to/from the intermediate result, - and store the results in "dst" using writemask "k" (elements are copied from "a" when - the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 31 - IF k[j] - IF ((j & 1) == 0) + + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 31 + IF k[j] + IF ((j & 1) == 0) dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE + ELSE dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - FI - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + FI + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively subtract and add packed elements in "c" to/from the intermediate result, - and store the results in "dst" using writemask "k" (elements are copied from "c" when - the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 31 - IF k[j] - IF ((j & 1) == 0) + + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 31 + IF k[j] + IF ((j & 1) == 0) dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE + ELSE dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - FI - ELSE - dst.fp16[j] := c.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + FI + ELSE + dst.fp16[j] := c.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - alternatively subtract and add packed elements in "c" to/from the intermediate result, - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 31 - IF k[j] - IF ((j & 1) == 0) + + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 31 + IF k[j] + IF ((j & 1) == 0) dst.fp16[j] := (a.fp16[j] * b.fp16[j]) + c.fp16[j] - ELSE + ELSE dst.fp16[j] := (a.fp16[j] * b.fp16[j]) - c.fp16[j] - FI - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - - AVX512_FP16 -
immintrin.h
- Arithmetic + FI + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 +
+ + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - Subtract packed half-precision (16-bit) floating-point elements in "b" from - packed half-precision (16-bit) floating-point elements in "a", and store the results in - "dst". - - FOR j := 0 TO 31 - dst.fp16[j] := a.fp16[j] - b.fp16[j] - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + Subtract packed half-precision (16-bit) floating-point elements in "b" from packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 TO 31 + dst.fp16[j] := a.fp16[j] - b.fp16[j] +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Subtract packed half-precision (16-bit) floating-point elements in "b" from - packed half-precision (16-bit) floating-point elements in "a", and store the results in - "dst". - [round_note] - - FOR j := 0 TO 31 - dst.fp16[j] := a.fp16[j] - b.fp16[j] - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Subtract packed half-precision (16-bit) floating-point elements in "b" from packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". + [round_note] + +FOR j := 0 TO 31 + dst.fp16[j] := a.fp16[j] - b.fp16[j] +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed half-precision (16-bit) floating-point elements in "b" from - packed half-precision (16-bit) floating-point elements in "a", and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 TO 31 - IF k[j] - dst.fp16[j] := a.fp16[j] - b.fp16[j] - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Subtract packed half-precision (16-bit) floating-point elements in "b" from packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 31 + IF k[j] + dst.fp16[j] := a.fp16[j] - b.fp16[j] + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Subtract packed half-precision (16-bit) floating-point elements in "b" from - packed half-precision (16-bit) floating-point elements in "a", and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - [round_note] - - FOR j := 0 TO 31 - IF k[j] - dst.fp16[j] := a.fp16[j] - b.fp16[j] - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Subtract packed half-precision (16-bit) floating-point elements in "b" from packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 31 + IF k[j] + dst.fp16[j] := a.fp16[j] - b.fp16[j] + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Subtract packed half-precision (16-bit) floating-point elements in "b" from - packed half-precision (16-bit) floating-point elements in "a", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 TO 31 - IF k[j] - dst.fp16[j] := a.fp16[j] - b.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Subtract packed half-precision (16-bit) floating-point elements in "b" from packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 31 + IF k[j] + dst.fp16[j] := a.fp16[j] - b.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Subtract packed half-precision (16-bit) floating-point elements in "b" from - packed half-precision (16-bit) floating-point elements in "a", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - [round_note] - - FOR j := 0 TO 31 - IF k[j] - dst.fp16[j] := a.fp16[j] - b.fp16[j] - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Subtract packed half-precision (16-bit) floating-point elements in "b" from packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 31 + IF k[j] + dst.fp16[j] := a.fp16[j] - b.fp16[j] + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - Subtract the lower half-precision (16-bit) floating-point element in "b" from - the lower half-precision (16-bit) floating-point element in "a", store the result in the - lower element of "dst", and copy the upper 7 packed elements from "a" to the upper - elements of "dst". - - dst.fp16[0] := a.fp16[0] - b.fp16[0] - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + Subtract the lower half-precision (16-bit) floating-point element in "b" from the lower half-precision (16-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +dst.fp16[0] := a.fp16[0] - b.fp16[0] +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Subtract the lower half-precision (16-bit) floating-point element in "b" from - the lower half-precision (16-bit) floating-point element in "a", store the result in the - lower element of "dst", and copy the upper 7 packed elements from "a" to the upper - elements of "dst". - [round_note] - - dst.fp16[0] := a.fp16[0] - b.fp16[0] - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic -
- - - - - - - Subtract the lower half-precision (16-bit) floating-point element in "b" from - the lower half-precision (16-bit) floating-point element in "a", store the result in the - lower element of "dst" using writemask "k" (the element is copied from "src" when mask - bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements - of "dst". - - IF k[0] - dst.fp16[0] := a.fp16[0] - b.fp16[0] - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Subtract the lower half-precision (16-bit) floating-point element in "b" from the lower half-precision (16-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst.fp16[0] := a.fp16[0] - b.fp16[0] +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic +
+ + + + + + + Subtract the lower half-precision (16-bit) floating-point element in "b" from the lower half-precision (16-bit) floating-point element in "a", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := a.fp16[0] - b.fp16[0] +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Subtract the lower half-precision (16-bit) floating-point element in "b" from - the lower half-precision (16-bit) floating-point element in "a", store the result in the - lower element of "dst" using writemask "k" (the element is copied from "src" when mask - bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements - of "dst". - [round_note] - - IF k[0] - dst.fp16[0] := a.fp16[0] - b.fp16[0] - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Subtract the lower half-precision (16-bit) floating-point element in "b" from the lower half-precision (16-bit) floating-point element in "a", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst.fp16[0] := a.fp16[0] - b.fp16[0] +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Subtract the lower half-precision (16-bit) floating-point element in "b" from - the lower half-precision (16-bit) floating-point element in "a", store the result in the - lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is - not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". - - IF k[0] - dst.fp16[0] := a.fp16[0] - b.fp16[0] - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Subtract the lower half-precision (16-bit) floating-point element in "b" from the lower half-precision (16-bit) floating-point element in "a", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := a.fp16[0] - b.fp16[0] +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Subtract the lower half-precision (16-bit) floating-point element in "b" from - the lower half-precision (16-bit) floating-point element in "a", store the result in the - lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is - not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". - [round_note] - - IF k[0] - dst.fp16[0] := a.fp16[0] - b.fp16[0] - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Subtract the lower half-precision (16-bit) floating-point element in "b" from the lower half-precision (16-bit) floating-point element in "a", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst.fp16[0] := a.fp16[0] - b.fp16[0] +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - and store the results in "dst". - - FOR i := 0 TO 31 - dst.fp16[i] := a.fp16[i] * b.fp16[i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR i := 0 TO 31 + dst.fp16[i] := a.fp16[i] * b.fp16[i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - and store the results in "dst". - [round_note] - - FOR i := 0 TO 31 - dst.fp16[i] := a.fp16[i] * b.fp16[i] - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst". + [round_note] + +FOR i := 0 TO 31 + dst.fp16[i] := a.fp16[i] * b.fp16[i] +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - - FOR i := 0 TO 31 - IF k[i] - dst.fp16[i] := a.fp16[i] * b.fp16[i] - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR i := 0 TO 31 + IF k[i] + dst.fp16[i] := a.fp16[i] * b.fp16[i] + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - and store the results in "dst" using writemask "k" (elements are copied from "src" when - the corresponding mask bit is not set). - [round_note] - - FOR i := 0 TO 31 - IF k[i] - dst.fp16[i] := a.fp16[i] * b.fp16[i] - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR i := 0 TO 31 + IF k[i] + dst.fp16[i] := a.fp16[i] * b.fp16[i] + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR i := 0 TO 31 - IF k[i] - dst.fp16[i] := a.fp16[i] * b.fp16[i] - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR i := 0 TO 31 + IF k[i] + dst.fp16[i] := a.fp16[i] * b.fp16[i] + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - [round_note] - - FOR i := 0 TO 31 - IF k[i] - dst.fp16[i] := a.fp16[i] * b.fp16[i] - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed half-precision (16-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR i := 0 TO 31 + IF k[i] + dst.fp16[i] := a.fp16[i] * b.fp16[i] + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - Multiply the lower half-precision (16-bit) floating-point element in "a" and - "b", store the result in the lower element of "dst", and copy the upper 7 packed - elements from "a" to the upper elements of "dst". - - dst.fp16[0] := a.fp16[0] * b.fp16[0] - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + Multiply the lower half-precision (16-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +dst.fp16[0] := a.fp16[0] * b.fp16[0] +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower half-precision (16-bit) floating-point element in "a" and - "b", store the result in the lower element of "dst", and copy the upper 7 packed - elements from "a" to the upper elements of "dst". - [round_note] - - dst.fp16[0] := a.fp16[0] * b.fp16[0] - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply the lower half-precision (16-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst.fp16[0] := a.fp16[0] * b.fp16[0] +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower half-precision (16-bit) floating-point element in "a" and - "b", store the result in the lower element of "dst" using writemask "k" (the element is - copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from - "a" to the upper elements of "dst". - - IF k[0] - dst.fp16[0] := a.fp16[0] * b.fp16[0] - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower half-precision (16-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := a.fp16[0] * b.fp16[0] +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower half-precision (16-bit) floating-point element in "a" and - "b", store the result in the lower element of "dst" using writemask "k" (the element is - copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from - "a" to the upper elements of "dst". - [round_note] - - IF k[0] - dst.fp16[0] := a.fp16[0] * b.fp16[0] - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower half-precision (16-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst.fp16[0] := a.fp16[0] * b.fp16[0] +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower half-precision (16-bit) floating-point element in "a" and - "b", store the result in the lower element of "dst" using zeromask "k" (the element is - zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to - the upper elements of "dst". - - IF k[0] - dst.fp16[0] := a.fp16[0] * b.fp16[0] - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply the lower half-precision (16-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := a.fp16[0] * b.fp16[0] +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower half-precision (16-bit) floating-point element in "a" and - "b", store the result in the lower element of "dst" using zeromask "k" (the element is - zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to - the upper elements of "dst". - [round_note] - - IF k[0] - dst.fp16[0] := a.fp16[0] * b.fp16[0] - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower half-precision (16-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst.fp16[0] := a.fp16[0] * b.fp16[0] +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - Multiply packed complex numbers in "a" and "b", and store the results in "dst". - Each complex number is composed of two adjacent half-precision (16-bit) floating-point - elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - FOR i := 0 to 15 - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + Multiply packed complex numbers in "a" and "b", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 15 + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - Multiply packed complex numbers in "a" and "b", and store the results in "dst". - Each complex number is composed of two adjacent half-precision (16-bit) floating-point - elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - FOR i := 0 to 15 - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + Multiply packed complex numbers in "a" and "b", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 15 + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" and "b", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). Each complex number is composed of two adjacent half-precision (16-bit) - floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * - vec.fp16[1]". - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := src.fp16[2*i+0] - dst.fp16[2*i+1] := src.fp16[2*i+1] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := src.fp16[2*i+0] + dst.fp16[2*i+1] := src.fp16[2*i+1] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" and "b", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). Each complex number is composed of two adjacent half-precision (16-bit) - floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * - vec.fp16[1]". - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := src.fp16[2*i+0] - dst.fp16[2*i+1] := src.fp16[2*i+1] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := src.fp16[2*i+0] + dst.fp16[2*i+1] := src.fp16[2*i+1] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply packed complex numbers in "a" and "b", and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - Each complex number is composed of two adjacent half-precision (16-bit) floating-point - elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := 0 - dst.fp16[2*i+1] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply packed complex numbers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := 0 + dst.fp16[2*i+1] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply packed complex numbers in "a" and "b", and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - Each complex number is composed of two adjacent half-precision (16-bit) floating-point - elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := 0 - dst.fp16[2*i+1] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply packed complex numbers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := 0 + dst.fp16[2*i+1] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply packed complex numbers in "a" and "b", and store the results in "dst". - Each complex number is composed of two adjacent half-precision (16-bit) floating-point - elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + + + + + Multiply packed complex numbers in "a" and "b", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". [round_note] - - FOR i := 0 to 15 - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + +FOR i := 0 to 15 + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply packed complex numbers in "a" and "b", and store the results in "dst". - Each complex number is composed of two adjacent half-precision (16-bit) floating-point - elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + + + + + Multiply packed complex numbers in "a" and "b", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". [round_note] - - FOR i := 0 to 15 - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + +FOR i := 0 to 15 + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed complex numbers in "a" and "b", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). Each complex number is composed of two adjacent half-precision (16-bit) - floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * - vec.fp16[1]". - [round_note] - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := src.fp16[2*i+0] - dst.fp16[2*i+1] := src.fp16[2*i+1] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply packed complex numbers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + [round_note] + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := src.fp16[2*i+0] + dst.fp16[2*i+1] := src.fp16[2*i+1] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed complex numbers in "a" and "b", and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). Each complex number is composed of two adjacent half-precision (16-bit) - floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * - vec.fp16[1]". - [round_note] - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := src.fp16[2*i+0] - dst.fp16[2*i+1] := src.fp16[2*i+1] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply packed complex numbers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + [round_note] + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := src.fp16[2*i+0] + dst.fp16[2*i+1] := src.fp16[2*i+1] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" and "b", and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - Each complex number is composed of two adjacent half-precision (16-bit) floating-point - elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + + + + + + Multiply packed complex numbers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". [round_note] - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := 0 - dst.fp16[2*i+1] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := 0 + dst.fp16[2*i+1] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" and "b", and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - Each complex number is composed of two adjacent half-precision (16-bit) floating-point - elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + + + + + + Multiply packed complex numbers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". [round_note] - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := 0 - dst.fp16[2*i+1] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := 0 + dst.fp16[2*i+1] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - Multiply the lower complex numbers in "a" and "b", and store the result in the - lower elements of "dst", and copy the upper 6 packed elements from "a" to the upper - elements of "dst". Each complex number is composed of two adjacent half-precision - (16-bit) floating-point elements, which defines the complex number "complex = - vec.fp16[0] + i * vec.fp16[1]". - - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + Multiply the lower complex numbers in "a" and "b", and store the result in the lower elements of "dst", and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) +dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - Multiply the lower complex numbers in "a" and "b", and store the result in the - lower elements of "dst", and copy the upper 6 packed elements from "a" to the upper - elements of "dst". Each complex number is composed of two adjacent half-precision - (16-bit) floating-point elements, which defines the complex number "complex = - vec.fp16[0] + i * vec.fp16[1]". - - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + Multiply the lower complex numbers in "a" and "b", and store the result in the lower elements of "dst", and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) +dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower complex numbers in "a" and "b", and store the result in the - lower elements of "dst" using writemask "k" (elements are copied from "src" when mask - bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements - of "dst". Each complex number is composed of two adjacent half-precision (16-bit) - floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * - vec.fp16[1]". - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) - ELSE - dst.fp16[0] := src.fp16[0] - dst.fp16[1] := src.fp16[1] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower complex numbers in "a" and "b", and store the result in the lower elements of "dst" using writemask "k" (elements are copied from "src" when mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) +ELSE + dst.fp16[0] := src.fp16[0] + dst.fp16[1] := src.fp16[1] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower complex numbers in "a" and "b", and store the result in the - lower elements of "dst" using writemask "k" (elements are copied from "src" when mask - bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements - of "dst". Each complex number is composed of two adjacent half-precision (16-bit) - floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * - vec.fp16[1]". - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) - ELSE - dst.fp16[0] := src.fp16[0] - dst.fp16[1] := src.fp16[1] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower complex numbers in "a" and "b", and store the result in the lower elements of "dst" using writemask "k" (elements are copied from "src" when mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) +ELSE + dst.fp16[0] := src.fp16[0] + dst.fp16[1] := src.fp16[1] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower complex numbers in "a" and "b", and store the result in the - lower elements of "dst" using zeromask "k" (elements are zeroed out when mask bit 0 is - not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". - Each complex number is composed of two adjacent half-precision (16-bit) floating-point - elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) - ELSE - dst.fp16[0] := 0 - dst.fp16[1] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply the lower complex numbers in "a" and "b", and store the result in the lower elements of "dst" using zeromask "k" (elements are zeroed out when mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) +ELSE + dst.fp16[0] := 0 + dst.fp16[1] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower complex numbers in "a" and "b", and store the result in the - lower elements of "dst" using zeromask "k" (elements are zeroed out when mask bit 0 is - not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". - Each complex number is composed of two adjacent half-precision (16-bit) floating-point - elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) - ELSE - dst.fp16[0] := 0 - dst.fp16[1] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply the lower complex numbers in "a" and "b", and store the result in the lower elements of "dst" using zeromask "k" (elements are zeroed out when mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) +ELSE + dst.fp16[0] := 0 + dst.fp16[1] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower complex numbers in "a" and "b", and store the result in the - lower elements of "dst", and copy the upper 6 packed elements from "a" to the upper - elements of "dst". Each complex number is composed of two adjacent half-precision - (16-bit) floating-point elements, which defines the complex number "complex = - vec.fp16[0] + i * vec.fp16[1]". + + + + + Multiply the lower complex numbers in "a" and "b", and store the result in the lower elements of "dst", and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". [round_note] - - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + +dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) +dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower complex numbers in "a" and "b", and store the result in the - lower elements of "dst", and copy the upper 6 packed elements from "a" to the upper - elements of "dst". Each complex number is composed of two adjacent half-precision - (16-bit) floating-point elements, which defines the complex number "complex = - vec.fp16[0] + i * vec.fp16[1]". + + + + + Multiply the lower complex numbers in "a" and "b", and store the result in the lower elements of "dst", and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". [round_note] - - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + +dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) +dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower complex numbers in "a" and "b", and store the result in the - lower elements of "dst" using writemask "k" (elements are copied from "src" when mask - bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements - of "dst". Each complex number is composed of two adjacent half-precision (16-bit) - floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * - vec.fp16[1]". - [round_note] - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) - ELSE - dst.fp16[0] := src.fp16[0] - dst.fp16[1] := src.fp16[1] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower complex numbers in "a" and "b", and store the result in the lower elements of "dst" using writemask "k" (elements are copied from "src" when mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + [round_note] + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) +ELSE + dst.fp16[0] := src.fp16[0] + dst.fp16[1] := src.fp16[1] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower complex numbers in "a" and "b", and store the result in the - lower elements of "dst" using writemask "k" (elements are copied from "src" when mask - bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements - of "dst". Each complex number is composed of two adjacent half-precision (16-bit) - floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * - vec.fp16[1]". - [round_note] - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) - ELSE - dst.fp16[0] := src.fp16[0] - dst.fp16[1] := src.fp16[1] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower complex numbers in "a" and "b", and store the result in the lower elements of "dst" using writemask "k" (elements are copied from "src" when mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + [round_note] + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) +ELSE + dst.fp16[0] := src.fp16[0] + dst.fp16[1] := src.fp16[1] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower complex numbers in "a" and "b", and store the result in the - lower elements of "dst" using zeromask "k" (elements are zeroed out when mask bit 0 is - not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". - Each complex number is composed of two adjacent half-precision (16-bit) floating-point - elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + + + + + + Multiply the lower complex numbers in "a" and "b", and store the result in the lower elements of "dst" using zeromask "k" (elements are zeroed out when mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". [round_note] - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) - ELSE - dst.fp16[0] := 0 - dst.fp16[1] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) +ELSE + dst.fp16[0] := 0 + dst.fp16[1] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower complex numbers in "a" and "b", and store the result in the - lower elements of "dst" using zeromask "k" (elements are zeroed out when mask bit 0 is - not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". - Each complex number is composed of two adjacent half-precision (16-bit) floating-point - elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + + + + + + Multiply the lower complex numbers in "a" and "b", and store the result in the lower elements of "dst" using zeromask "k" (elements are zeroed out when mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". [round_note] - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) - ELSE - dst.fp16[0] := 0 - dst.fp16[1] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) +ELSE + dst.fp16[0] := 0 + dst.fp16[1] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", and store the results in "dst". Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 15 - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 15 + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", and store the results in "dst". Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 15 - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 15 + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := src.fp16[2*i+0] - dst.fp16[2*i+1] := src.fp16[2*i+1] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := src.fp16[2*i+0] + dst.fp16[2*i+1] := src.fp16[2*i+1] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := src.fp16[2*i+0] - dst.fp16[2*i+1] := src.fp16[2*i+1] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := src.fp16[2*i+0] + dst.fp16[2*i+1] := src.fp16[2*i+1] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := 0 - dst.fp16[2*i+1] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := 0 + dst.fp16[2*i+1] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := 0 - dst.fp16[2*i+1] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := 0 + dst.fp16[2*i+1] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", and store the results in "dst". Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - [round_note] - - FOR i := 0 to 15 - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + [round_note] + +FOR i := 0 to 15 + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", and store the results in "dst". Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - [round_note] - - FOR i := 0 to 15 - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + [round_note] + +FOR i := 0 to 15 + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - [round_note] - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := src.fp16[2*i+0] - dst.fp16[2*i+1] := src.fp16[2*i+1] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + [round_note] + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := src.fp16[2*i+0] + dst.fp16[2*i+1] := src.fp16[2*i+1] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - [round_note] - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := src.fp16[2*i+0] - dst.fp16[2*i+1] := src.fp16[2*i+1] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + [round_note] + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := src.fp16[2*i+0] + dst.fp16[2*i+1] := src.fp16[2*i+1] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - [round_note] - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := 0 - dst.fp16[2*i+1] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + [round_note] + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := 0 + dst.fp16[2*i+1] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - [round_note] - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) - ELSE - dst.fp16[2*i+0] := 0 - dst.fp16[2*i+1] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + [round_note] + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + ELSE + dst.fp16[2*i+0] := 0 + dst.fp16[2*i+1] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - Multiply the lower complex number in "a" by the complex conjugate of the lower - complex number in "b", and store the result in the lower elements of "dst", and copy the - upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + Multiply the lower complex number in "a" by the complex conjugate of the lower complex number in "b", and store the result in the lower elements of "dst", and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) +dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - Multiply the lower complex number in "a" by the complex conjugate of the lower - complex number in "b", and store the result in the lower elements of "dst", and copy the - upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + Multiply the lower complex number in "a" by the complex conjugate of the lower complex number in "b", and store the result in the lower elements of "dst", and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) +dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower complex number in "a" by the complex conjugate of the lower - complex number in "b", and store the result in the lower elements of "dst" using - writemask "k" (elements are copied from "src" when mask bit 0 is not set), and copy the - upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) - ELSE - dst.fp16[0] := src.fp16[0] - dst.fp16[1] := src.fp16[1] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower complex number in "a" by the complex conjugate of the lower complex number in "b", and store the result in the lower elements of "dst" using writemask "k" (elements are copied from "src" when mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) +ELSE + dst.fp16[0] := src.fp16[0] + dst.fp16[1] := src.fp16[1] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower complex number in "a" by the complex conjugate of the lower - complex number in "b", and store the result in the lower elements of "dst" using - writemask "k" (elements are copied from "src" when mask bit 0 is not set), and copy the - upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) - ELSE - dst.fp16[0] := src.fp16[0] - dst.fp16[1] := src.fp16[1] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower complex number in "a" by the complex conjugate of the lower complex number in "b", and store the result in the lower elements of "dst" using writemask "k" (elements are copied from "src" when mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) +ELSE + dst.fp16[0] := src.fp16[0] + dst.fp16[1] := src.fp16[1] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower complex number in "a" by the complex conjugate of the lower - complex number in "b", and store the result in the lower elements of "dst" using - zeromask "k" (elements are zeroed out when mask bit 0 is not set), and copy the upper 6 - packed elements from "a" to the upper elements of "dst". Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) - ELSE - dst.fp16[0] := 0 - dst.fp16[1] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply the lower complex number in "a" by the complex conjugate of the lower complex number in "b", and store the result in the lower elements of "dst" using zeromask "k" (elements are zeroed out when mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) +ELSE + dst.fp16[0] := 0 + dst.fp16[1] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower complex number in "a" by the complex conjugate of the lower - complex number in "b", and store the result in the lower elements of "dst" using - zeromask "k" (elements are zeroed out when mask bit 0 is not set), and copy the upper 6 - packed elements from "a" to the upper elements of "dst". Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) - ELSE - dst.fp16[0] := 0 - dst.fp16[1] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply the lower complex number in "a" by the complex conjugate of the lower complex number in "b", and store the result in the lower elements of "dst" using zeromask "k" (elements are zeroed out when mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) +ELSE + dst.fp16[0] := 0 + dst.fp16[1] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower complex number in "a" by the complex conjugate of the lower - complex number in "b", and store the result in the lower elements of "dst", and copy the - upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - [round_note] - - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply the lower complex number in "a" by the complex conjugate of the lower complex number in "b", and store the result in the lower elements of "dst", and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + [round_note] + +dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) +dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower complex number in "a" by the complex conjugate of the lower - complex number in "b", and store the result in the lower elements of "dst", and copy the - upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - [round_note] - - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply the lower complex number in "a" by the complex conjugate of the lower complex number in "b", and store the result in the lower elements of "dst", and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + [round_note] + +dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) +dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower complex number in "a" by the complex conjugate of the lower - complex number in "b", and store the result in the lower elements of "dst" using - writemask "k" (elements are copied from "src" when mask bit 0 is not set), and copy the - upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - [round_note] - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) - ELSE - dst.fp16[0] := src.fp16[0] - dst.fp16[1] := src.fp16[1] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower complex number in "a" by the complex conjugate of the lower complex number in "b", and store the result in the lower elements of "dst" using writemask "k" (elements are copied from "src" when mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + [round_note] + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) +ELSE + dst.fp16[0] := src.fp16[0] + dst.fp16[1] := src.fp16[1] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower complex number in "a" by the complex conjugate of the lower - complex number in "b", and store the result in the lower elements of "dst" using - writemask "k" (elements are copied from "src" when mask bit 0 is not set), and copy the - upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - [round_note] - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) - ELSE - dst.fp16[0] := src.fp16[0] - dst.fp16[1] := src.fp16[1] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower complex number in "a" by the complex conjugate of the lower complex number in "b", and store the result in the lower elements of "dst" using writemask "k" (elements are copied from "src" when mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + [round_note] + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) +ELSE + dst.fp16[0] := src.fp16[0] + dst.fp16[1] := src.fp16[1] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower complex number in "a" by the complex conjugate of the lower - complex number in "b", and store the result in the lower elements of "dst" using - zeromask "k" (elements are zeroed out when mask bit 0 is not set), and copy the upper 6 - packed elements from "a" to the upper elements of "dst". Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - [round_note] - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) - ELSE - dst.fp16[0] := 0 - dst.fp16[1] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower complex number in "a" by the complex conjugate of the lower complex number in "b", and store the result in the lower elements of "dst" using zeromask "k" (elements are zeroed out when mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + [round_note] + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) +ELSE + dst.fp16[0] := 0 + dst.fp16[1] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower complex number in "a" by the complex conjugate of the lower - complex number in "b", and store the result in the lower elements of "dst" using - zeromask "k" (elements are zeroed out when mask bit 0 is not set), and copy the upper 6 - packed elements from "a" to the upper elements of "dst". Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate - "conjugate = vec.fp16[0] - i * vec.fp16[1]". - [round_note] - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) - ELSE - dst.fp16[0] := 0 - dst.fp16[1] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower complex number in "a" by the complex conjugate of the lower complex number in "b", and store the result in the lower elements of "dst" using zeromask "k" (elements are zeroed out when mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + [round_note] + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) +ELSE + dst.fp16[0] := 0 + dst.fp16[1] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply packed complex numbers in "a" and "b", accumulate to the corresponding - complex numbers in "c", and store the results in "dst". Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - FOR i := 0 to 15 - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply packed complex numbers in "a" and "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 15 + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" and "b", accumulate to the corresponding - complex numbers in "src", and store the results in "dst" using writemask "k" (elements - are copied from "a" when the corresponding mask bit is not set). Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ELSE - dst.fp16[2*i+0] := a.fp16[2*i+0] - dst.fp16[2*i+1] := a.fp16[2*i+1] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" and "b", accumulate to the corresponding complex numbers in "src", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] + ELSE + dst.fp16[2*i+0] := a.fp16[2*i+0] + dst.fp16[2*i+1] := a.fp16[2*i+1] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" and "b", accumulate to the corresponding - complex numbers in "src", and store the results in "dst" using writemask "k" (elements - are copied from "c" when the corresponding mask bit is not set). Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ELSE - dst.fp16[2*i+0] := c.fp16[2*i+0] - dst.fp16[2*i+1] := c.fp16[2*i+1] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" and "b", accumulate to the corresponding complex numbers in "src", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] + ELSE + dst.fp16[2*i+0] := c.fp16[2*i+0] + dst.fp16[2*i+1] := c.fp16[2*i+1] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" and "b", accumulate to the corresponding - complex numbers in "c", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ELSE - dst.fp16[2*i+0] := 0 - dst.fp16[2*i+1] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" and "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] + ELSE + dst.fp16[2*i+0] := 0 + dst.fp16[2*i+1] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" and "b", accumulate to the corresponding - complex numbers in "c", and store the results in "dst". Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - [round_note] - - FOR i := 0 to 15 - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" and "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + [round_note] + +FOR i := 0 to 15 + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed complex numbers in "a" and "b", accumulate to the corresponding - complex numbers in "c", and store the results in "dst" using writemask "k" (elements are - copied from "a" when the corresponding mask bit is not set). Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - [round_note] - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ELSE - dst.fp16[2*i+0] := a.fp16[2*i+0] - dst.fp16[2*i+1] := a.fp16[2*i+1] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply packed complex numbers in "a" and "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + [round_note] + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] + ELSE + dst.fp16[2*i+0] := a.fp16[2*i+0] + dst.fp16[2*i+1] := a.fp16[2*i+1] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed complex numbers in "a" and "b", accumulate to the corresponding - complex numbers in "c", and store the results in "dst" using writemask "k" (elements are - copied from "c" when the corresponding mask bit is not set). Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - [round_note] - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ELSE - dst.fp16[2*i+0] := c.fp16[2*i+0] - dst.fp16[2*i+1] := c.fp16[2*i+1] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply packed complex numbers in "a" and "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + [round_note] + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] + ELSE + dst.fp16[2*i+0] := c.fp16[2*i+0] + dst.fp16[2*i+1] := c.fp16[2*i+1] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed complex numbers in "a" and "b", accumulate to the corresponding - complex numbers in "c", and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - [round_note] - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ELSE - dst.fp16[2*i+0] := 0 - dst.fp16[2*i+1] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply packed complex numbers in "a" and "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + [round_note] + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) - (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) + (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] + ELSE + dst.fp16[2*i+0] := 0 + dst.fp16[2*i+1] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower complex numbers in "a" and "b", accumulate to the lower - complex number in "c", and store the result in the lower elements of "dst", and copy the - upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) + c.fp16[0] - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) + c.fp16[1] - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply the lower complex numbers in "a" and "b", accumulate to the lower complex number in "c", and store the result in the lower elements of "dst", and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) + c.fp16[0] +dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) + c.fp16[1] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower complex numbers in "a" and "b", accumulate to the lower - complex number in "c", and store the result in the lower elements of "dst" using - writemask "k" (elements are copied from "a" when mask bit 0 is not set), and copy the - upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) + c.fp16[0] - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) + c.fp16[1] - ELSE - dst.fp16[0] := a.fp16[0] - dst.fp16[1] := a.fp16[1] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower complex numbers in "a" and "b", accumulate to the lower complex number in "c", and store the result in the lower elements of "dst" using writemask "k" (elements are copied from "a" when mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) + c.fp16[0] + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) + c.fp16[1] +ELSE + dst.fp16[0] := a.fp16[0] + dst.fp16[1] := a.fp16[1] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower complex number in "a" and "b", accumulate to the lower - complex number in "c", and store the result in the lower elements of "dst" using - writemask "k" (elements are copied from "c" when mask bit 0 is not set), and copy the - upper 6 packed elements from "c" to the upper elements of "dst". Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) + c.fp16[0] - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) + c.fp16[1] - ELSE - dst.fp16[0] := c.fp16[0] - dst.fp16[1] := c.fp16[1] - FI - dst[127:32] := c[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower complex number in "a" and "b", accumulate to the lower complex number in "c", and store the result in the lower elements of "dst" using writemask "k" (elements are copied from "c" when mask bit 0 is not set), and copy the upper 6 packed elements from "c" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) + c.fp16[0] + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) + c.fp16[1] +ELSE + dst.fp16[0] := c.fp16[0] + dst.fp16[1] := c.fp16[1] +FI +dst[127:32] := c[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower complex numbers in "a" and "b", accumulate to the lower - complex number in "c", and store the result in the lower elements of "dst" using - zeromask "k" (elements are zeroed out when mask bit 0 is not set), and copy the upper 6 - packed elements from "a" to the upper elements of "dst". Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) + c.fp16[0] - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) + c.fp16[1] - ELSE - dst.fp16[0] := 0 - dst.fp16[1] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower complex numbers in "a" and "b", accumulate to the lower complex number in "c", and store the result in the lower elements of "dst" using zeromask "k" (elements are zeroed out when mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) + c.fp16[0] + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) + c.fp16[1] +ELSE + dst.fp16[0] := 0 + dst.fp16[1] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower complex numbers in "a" and "b", accumulate to the lower - complex number in "c", and store the result in the lower elements of "dst", and copy the - upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - [round_note] - - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) + c.fp16[0] - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) + c.fp16[1] - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower complex numbers in "a" and "b", accumulate to the lower complex number in "c", and store the result in the lower elements of "dst", and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + [round_note] + +dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) + c.fp16[0] +dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) + c.fp16[1] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower complex numbers in "a" and "b", accumulate to the lower - complex number in "c", and store the result in the lower elements of "dst" using - writemask "k" (elements are copied from "a" when mask bit 0 is not set), and copy the - upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - [round_note] - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) + c.fp16[0] - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) + c.fp16[1] - ELSE - dst.fp16[0] := a.fp16[0] - dst.fp16[1] := a.fp16[1] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower complex numbers in "a" and "b", accumulate to the lower complex number in "c", and store the result in the lower elements of "dst" using writemask "k" (elements are copied from "a" when mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + [round_note] + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) + c.fp16[0] + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) + c.fp16[1] +ELSE + dst.fp16[0] := a.fp16[0] + dst.fp16[1] := a.fp16[1] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower complex numbers in "a" and "b", accumulate to the lower - complex number in "c", and store the result in the lower elements of "dst" using - writemask "k" (elements are copied from "c" when mask bit 0 is not set), and copy the - upper 6 packed elements from "c" to the upper elements of "dst". Each complex number is - composed of two adjacent half-precision (16-bit) floating-point elements, which defines - the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - [round_note] - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) + c.fp16[0] - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) + c.fp16[1] - ELSE - dst.fp16[0] := c.fp16[0] - dst.fp16[1] := c.fp16[1] - FI - dst[127:32] := c[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower complex numbers in "a" and "b", accumulate to the lower complex number in "c", and store the result in the lower elements of "dst" using writemask "k" (elements are copied from "c" when mask bit 0 is not set), and copy the upper 6 packed elements from "c" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + [round_note] + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) + c.fp16[0] + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) + c.fp16[1] +ELSE + dst.fp16[0] := c.fp16[0] + dst.fp16[1] := c.fp16[1] +FI +dst[127:32] := c[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower complex numbers in "a" and "b", accumulate to the lower - complex number in "c", and store the result in the lower elements of "dst" using - zeromask "k" (elements are zeroed out when mask bit 0 is not set), and copy the upper 6 - packed elements from "a" to the upper elements of "dst". Each complex number is composed - of two adjacent half-precision (16-bit) floating-point elements, which defines the - complex number "complex = vec.fp16[0] + i * vec.fp16[1]". - [round_note] - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) + c.fp16[0] - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) + c.fp16[1] - ELSE - dst.fp16[0] := 0 - dst.fp16[1] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic -
- - - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", accumulate to the corresponding complex numbers in "c", and - store the results in "dst". Each complex number is composed of two adjacent - half-precision (16-bit) floating-point elements, which defines the complex number - "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = - vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 15 - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower complex numbers in "a" and "b", accumulate to the lower complex number in "c", and store the result in the lower elements of "dst" using zeromask "k" (elements are zeroed out when mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]". + [round_note] + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) - (a.fp16[1] * b.fp16[1]) + c.fp16[0] + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) + (a.fp16[0] * b.fp16[1]) + c.fp16[1] +ELSE + dst.fp16[0] := 0 + dst.fp16[1] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", accumulate to the corresponding complex numbers in "c", and - store the results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). Each complex number is composed of two adjacent - half-precision (16-bit) floating-point elements, which defines the complex number - "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = - vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ELSE - dst.fp16[2*i+0] := a.fp16[2*i+0] - dst.fp16[2*i+1] := a.fp16[2*i+1] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 15 + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic +
+ + + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] + ELSE + dst.fp16[2*i+0] := a.fp16[2*i+0] + dst.fp16[2*i+1] := a.fp16[2*i+1] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", accumulate to the corresponding complex numbers in "c", and - store the results in "dst" using writemask "k" (elements are copied from "c" when the - corresponding mask bit is not set). Each complex number is composed of two adjacent - half-precision (16-bit) floating-point elements, which defines the complex number - "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = - vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ELSE - dst.fp16[2*i+0] := c.fp16[2*i+0] - dst.fp16[2*i+1] := c.fp16[2*i+1] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] + ELSE + dst.fp16[2*i+0] := c.fp16[2*i+0] + dst.fp16[2*i+1] := c.fp16[2*i+1] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", accumulate to the corresponding complex numbers in "c", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). Each complex number is composed of two adjacent - half-precision (16-bit) floating-point elements, which defines the complex number - "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = - vec.fp16[0] - i * vec.fp16[1]". - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ELSE - dst.fp16[2*i+0] := 0 - dst.fp16[2*i+1] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] + ELSE + dst.fp16[2*i+0] := 0 + dst.fp16[2*i+1] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", accumulate to the corresponding complex numbers in "c", and - store the results in "dst". Each complex number is composed of two adjacent - half-precision (16-bit) floating-point elements, which defines the complex number - "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = - vec.fp16[0] - i * vec.fp16[1]". - [round_note] - - FOR i := 0 to 15 - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + [round_note] + +FOR i := 0 to 15 + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", accumulate to the corresponding complex numbers in "c", and - store the results in "dst" using writemask "k" (elements are copied from "a" when the - corresponding mask bit is not set). Each complex number is composed of two adjacent - half-precision (16-bit) floating-point elements, which defines the complex number - "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = - vec.fp16[0] - i * vec.fp16[1]". - [round_note] - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ELSE - dst.fp16[2*i+0] := a.fp16[2*i+0] - dst.fp16[2*i+1] := a.fp16[2*i+1] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + [round_note] + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] + ELSE + dst.fp16[2*i+0] := a.fp16[2*i+0] + dst.fp16[2*i+1] := a.fp16[2*i+1] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", accumulate to the corresponding complex numbers in "c", and - store the results in "dst" using writemask "k" (elements are copied from "c" when the - corresponding mask bit is not set). Each complex number is composed of two adjacent - half-precision (16-bit) floating-point elements, which defines the complex number - "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = - vec.fp16[0] - i * vec.fp16[1]". - [round_note] - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ELSE - dst.fp16[2*i+0] := c.fp16[2*i+0] - dst.fp16[2*i+1] := c.fp16[2*i+1] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + [round_note] + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] + ELSE + dst.fp16[2*i+0] := c.fp16[2*i+0] + dst.fp16[2*i+1] := c.fp16[2*i+1] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply packed complex numbers in "a" by the complex conjugates of packed - complex numbers in "b", accumulate to the corresponding complex numbers in "c", and - store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). Each complex number is composed of two adjacent - half-precision (16-bit) floating-point elements, which defines the complex number - "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = - vec.fp16[0] - i * vec.fp16[1]". - [round_note] - - FOR i := 0 to 15 - IF k[i] - dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + - c.fp16[2*i+0] - dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + - c.fp16[2*i+1] - ELSE - dst.fp16[2*i+0] := 0 - dst.fp16[2*i+1] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply packed complex numbers in "a" by the complex conjugates of packed complex numbers in "b", accumulate to the corresponding complex numbers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + [round_note] + +FOR i := 0 to 15 + IF k[i] + dst.fp16[2*i+0] := (a.fp16[2*i+0] * b.fp16[2*i+0]) + (a.fp16[2*i+1] * b.fp16[2*i+1]) + c.fp16[2*i+0] + dst.fp16[2*i+1] := (a.fp16[2*i+1] * b.fp16[2*i+0]) - (a.fp16[2*i+0] * b.fp16[2*i+1]) + c.fp16[2*i+1] + ELSE + dst.fp16[2*i+0] := 0 + dst.fp16[2*i+1] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower complex number in "a" by the complex conjugate of the lower - complex number in "b", accumulate to the lower complex number in "c", and store the - result in the lower elements of "dst", and copy the upper 6 packed elements from "a" to - the upper elements of "dst". Each complex number is composed of two adjacent - half-precision (16-bit) floating-point elements, which defines the complex number - "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = - vec.fp16[0] - i * vec.fp16[1]". - - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) + c.fp16[0] - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) + c.fp16[1] - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Multiply the lower complex number in "a" by the complex conjugate of the lower complex number in "b", accumulate to the lower complex number in "c", and store the result in the lower elements of "dst", and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) + c.fp16[0] +dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) + c.fp16[1] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower complex number in "a" by the complex conjugate of the lower - complex number in "b", accumulate to the lower complex number in "c", and store the - result in the lower elements of "dst" using writemask "k" (elements are copied from "a" - when mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper - elements of "dst". Each complex number is composed of two adjacent half-precision - (16-bit) floating-point elements, which defines the complex number "complex = - vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * - vec.fp16[1]". - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) + c.fp16[0] - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) + c.fp16[1] - ELSE - dst.fp16[0] := a.fp16[0] - dst.fp16[1] := a.fp16[1] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower complex number in "a" by the complex conjugate of the lower complex number in "b", accumulate to the lower complex number in "c", and store the result in the lower elements of "dst" using writemask "k" (elements are copied from "a" when mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) + c.fp16[0] + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) + c.fp16[1] +ELSE + dst.fp16[0] := a.fp16[0] + dst.fp16[1] := a.fp16[1] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower complex number in "a" by the complex conjugate of the lower - complex number in "b", accumulate to the lower complex number in "c", and store the - result in the lower elements of "dst" using writemask "k" (elements are copied from "c" - when mask bit 0 is not set), and copy the upper 6 packed elements from "c" to the upper - elements of "dst". Each complex number is composed of two adjacent half-precision - (16-bit) floating-point elements, which defines the complex number "complex = - vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * - vec.fp16[1]". - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) + c.fp16[0] - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) + c.fp16[1] - ELSE - dst.fp16[0] := c.fp16[0] - dst.fp16[1] := c.fp16[1] - FI - dst[127:32] := c[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower complex number in "a" by the complex conjugate of the lower complex number in "b", accumulate to the lower complex number in "c", and store the result in the lower elements of "dst" using writemask "k" (elements are copied from "c" when mask bit 0 is not set), and copy the upper 6 packed elements from "c" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) + c.fp16[0] + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) + c.fp16[1] +ELSE + dst.fp16[0] := c.fp16[0] + dst.fp16[1] := c.fp16[1] +FI +dst[127:32] := c[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower complex number in "a" by the complex conjugate of the lower - complex number in "b", accumulate to the lower complex number in "c", and store the - result in the lower elements of "dst" using zeromask "k" (elements are zeroed out when - mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper - elements of "dst". Each complex number is composed of two adjacent half-precision - (16-bit) floating-point elements, which defines the complex number "complex = - vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * - vec.fp16[1]". - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) + c.fp16[0] - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) + c.fp16[1] - ELSE - dst.fp16[0] := 0 - dst.fp16[1] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower complex number in "a" by the complex conjugate of the lower complex number in "b", accumulate to the lower complex number in "c", and store the result in the lower elements of "dst" using zeromask "k" (elements are zeroed out when mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) + c.fp16[0] + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) + c.fp16[1] +ELSE + dst.fp16[0] := 0 + dst.fp16[1] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - Multiply the lower complex number in "a" by the complex conjugate of the lower - complex number in "b", accumulate to the lower complex number in "c", and store the - result in the lower elements of "dst", and copy the upper 6 packed elements from "a" to - the upper elements of "dst". Each complex number is composed of two adjacent - half-precision (16-bit) floating-point elements, which defines the complex number - "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = - vec.fp16[0] - i * vec.fp16[1]". - [round_note] - - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) + c.fp16[0] - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) + c.fp16[1] - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + Multiply the lower complex number in "a" by the complex conjugate of the lower complex number in "b", accumulate to the lower complex number in "c", and store the result in the lower elements of "dst", and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + [round_note] + +dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) + c.fp16[0] +dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) + c.fp16[1] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower complex number in "a" by the complex conjugate of the lower - complex number in "b", accumulate to the lower complex number in "c", and store the - result in the lower elements of "dst" using writemask "k" (elements are copied from "a" - when mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper - elements of "dst". Each complex number is composed of two adjacent half-precision - (16-bit) floating-point elements, which defines the complex number "complex = - vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * - vec.fp16[1]". - [round_note] - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) + c.fp16[0] - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) + c.fp16[1] - ELSE - dst.fp16[0] := a.fp16[0] - dst.fp16[1] := a.fp16[1] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower complex number in "a" by the complex conjugate of the lower complex number in "b", accumulate to the lower complex number in "c", and store the result in the lower elements of "dst" using writemask "k" (elements are copied from "a" when mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + [round_note] + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) + c.fp16[0] + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) + c.fp16[1] +ELSE + dst.fp16[0] := a.fp16[0] + dst.fp16[1] := a.fp16[1] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower complex number in "a" by the complex conjugate of the lower - complex number in "b", accumulate to the lower complex number in "c", and store the - result in the lower elements of "dst" using writemask "k" (elements are copied from "c" - when mask bit 0 is not set), and copy the upper 6 packed elements from "c" to the upper - elements of "dst". Each complex number is composed of two adjacent half-precision - (16-bit) floating-point elements, which defines the complex number "complex = - vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * - vec.fp16[1]". - [round_note] - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) + c.fp16[0] - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) + c.fp16[1] - ELSE - dst.fp16[0] := c.fp16[0] - dst.fp16[1] := c.fp16[1] - FI - dst[127:32] := c[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower complex number in "a" by the complex conjugate of the lower complex number in "b", accumulate to the lower complex number in "c", and store the result in the lower elements of "dst" using writemask "k" (elements are copied from "c" when mask bit 0 is not set), and copy the upper 6 packed elements from "c" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + [round_note] + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) + c.fp16[0] + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) + c.fp16[1] +ELSE + dst.fp16[0] := c.fp16[0] + dst.fp16[1] := c.fp16[1] +FI +dst[127:32] := c[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - - - Multiply the lower complex number in "a" by the complex conjugate of the lower - complex number in "b", accumulate to the lower complex number in "c", and store the - result in the lower elements of "dst" using zeromask "k" (elements are zeroed out when - mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper - elements of "dst". Each complex number is composed of two adjacent half-precision - (16-bit) floating-point elements, which defines the complex number "complex = - vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * - vec.fp16[1]". - [round_note] - - IF k[0] - dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) + c.fp16[0] - dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) + c.fp16[1] - ELSE - dst.fp16[0] := 0 - dst.fp16[1] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + + + Multiply the lower complex number in "a" by the complex conjugate of the lower complex number in "b", accumulate to the lower complex number in "c", and store the result in the lower elements of "dst" using zeromask "k" (elements are zeroed out when mask bit 0 is not set), and copy the upper 6 packed elements from "a" to the upper elements of "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + [round_note] + +IF k[0] + dst.fp16[0] := (a.fp16[0] * b.fp16[0]) + (a.fp16[1] * b.fp16[1]) + c.fp16[0] + dst.fp16[1] := (a.fp16[1] * b.fp16[0]) - (a.fp16[0] * b.fp16[1]) + c.fp16[1] +ELSE + dst.fp16[0] := 0 + dst.fp16[1] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - Reduce the packed half-precision (16-bit) floating-point elements in "a" by - addition. Returns the sum of all elements in "a". - - tmp := a - FOR i := 0 to 15 - tmp.fp16[i] := tmp.fp16[i] + a.fp16[i+16] - ENDFOR - FOR i := 0 to 7 - tmp.fp16[i] := tmp.fp16[i] + tmp.fp16[i+8] - ENDFOR - FOR i := 0 to 3 - tmp.fp16[i] := tmp.fp16[i] + tmp.fp16[i+4] - ENDFOR - FOR i := 0 to 1 - tmp.fp16[i] := tmp.fp16[i] + tmp.fp16[i+2] - ENDFOR - dst.fp16[0] := tmp.fp16[0] + tmp.fp16[1] - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + Reduce the packed half-precision (16-bit) floating-point elements in "a" by addition. Returns the sum of all elements in "a". + +tmp := a +FOR i := 0 to 15 + tmp.fp16[i] := tmp.fp16[i] + a.fp16[i+16] +ENDFOR +FOR i := 0 to 7 + tmp.fp16[i] := tmp.fp16[i] + tmp.fp16[i+8] +ENDFOR +FOR i := 0 to 3 + tmp.fp16[i] := tmp.fp16[i] + tmp.fp16[i+4] +ENDFOR +FOR i := 0 to 1 + tmp.fp16[i] := tmp.fp16[i] + tmp.fp16[i+2] +ENDFOR +dst.fp16[0] := tmp.fp16[0] + tmp.fp16[1] + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - Reduce the packed half-precision (16-bit) floating-point elements in "a" by - multiplication. Returns the product of all elements in "a". - - tmp := a - FOR i := 0 to 15 - tmp.fp16[i] := tmp.fp16[i] * a.fp16[i+16] - ENDFOR - FOR i := 0 to 7 - tmp.fp16[i] := tmp.fp16[i] * tmp.fp16[i+8] - ENDFOR - FOR i := 0 to 3 - tmp.fp16[i] := tmp.fp16[i] * tmp.fp16[i+4] - ENDFOR - FOR i := 0 to 1 - tmp.fp16[i] := tmp.fp16[i] * tmp.fp16[i+2] - ENDFOR - dst.fp16[0] := tmp.fp16[0] * tmp.fp16[1] - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + Reduce the packed half-precision (16-bit) floating-point elements in "a" by multiplication. Returns the product of all elements in "a". + +tmp := a +FOR i := 0 to 15 + tmp.fp16[i] := tmp.fp16[i] * a.fp16[i+16] +ENDFOR +FOR i := 0 to 7 + tmp.fp16[i] := tmp.fp16[i] * tmp.fp16[i+8] +ENDFOR +FOR i := 0 to 3 + tmp.fp16[i] := tmp.fp16[i] * tmp.fp16[i+4] +ENDFOR +FOR i := 0 to 1 + tmp.fp16[i] := tmp.fp16[i] * tmp.fp16[i+2] +ENDFOR +dst.fp16[0] := tmp.fp16[0] * tmp.fp16[1] + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - Reduce the packed half-precision (16-bit) floating-point elements in "a" by - maximum. Returns the maximum of all elements in "a". [max_float_note] - - tmp := a - FOR i := 0 to 15 - tmp.fp16[i] := (a.fp16[i] > a.fp16[i+16] ? a.fp16[i] : a.fp16[i+16]) - ENDFOR - FOR i := 0 to 7 - tmp.fp16[i] := (tmp.fp16[i] > tmp.fp16[i+8] ? tmp.fp16[i] : tmp.fp16[i+8]) - ENDFOR - FOR i := 0 to 3 - tmp.fp16[i] := (tmp.fp16[i] > tmp.fp16[i+4] ? tmp.fp16[i] : tmp.fp16[i+4]) - ENDFOR - FOR i := 0 to 1 - tmp.fp16[i] := (tmp.fp16[i] > tmp.fp16[i+2] ? tmp.fp16[i] : tmp.fp16[i+2]) - ENDFOR - dst.fp16[0] := (tmp.fp16[0] > tmp.fp16[1] ? tmp.fp16[0] : tmp.fp16[1]) - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + Reduce the packed half-precision (16-bit) floating-point elements in "a" by maximum. Returns the maximum of all elements in "a". [max_float_note] + +tmp := a +FOR i := 0 to 15 + tmp.fp16[i] := (a.fp16[i] > a.fp16[i+16] ? a.fp16[i] : a.fp16[i+16]) +ENDFOR +FOR i := 0 to 7 + tmp.fp16[i] := (tmp.fp16[i] > tmp.fp16[i+8] ? tmp.fp16[i] : tmp.fp16[i+8]) +ENDFOR +FOR i := 0 to 3 + tmp.fp16[i] := (tmp.fp16[i] > tmp.fp16[i+4] ? tmp.fp16[i] : tmp.fp16[i+4]) +ENDFOR +FOR i := 0 to 1 + tmp.fp16[i] := (tmp.fp16[i] > tmp.fp16[i+2] ? tmp.fp16[i] : tmp.fp16[i+2]) +ENDFOR +dst.fp16[0] := (tmp.fp16[0] > tmp.fp16[1] ? tmp.fp16[0] : tmp.fp16[1]) + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - Reduce the packed half-precision (16-bit) floating-point elements in "a" by - minimum. Returns the minimum of all elements in "a". [min_float_note] - - tmp := a - FOR i := 0 to 15 - tmp.fp16[i] := (a.fp16[i] < a.fp16[i+16] ? tmp.fp16[i] : a.fp16[i+16]) - ENDFOR - FOR i := 0 to 7 - tmp.fp16[i] := (tmp.fp16[i] < tmp.fp16[i+8] ? tmp.fp16[i] : tmp.fp16[i+8]) - ENDFOR - FOR i := 0 to 3 - tmp.fp16[i] := (tmp.fp16[i] < tmp.fp16[i+4] ? tmp.fp16[i] : tmp.fp16[i+4]) - ENDFOR - FOR i := 0 to 1 - tmp.fp16[i] := (tmp.fp16[i] < tmp.fp16[i+2] ? tmp.fp16[i] : tmp.fp16[i+2]) - ENDFOR - dst.fp16[0] := (tmp.fp16[0] < tmp.fp16[1] ? tmp.fp16[0] : tmp.fp16[1]) - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + Reduce the packed half-precision (16-bit) floating-point elements in "a" by minimum. Returns the minimum of all elements in "a". [min_float_note] + +tmp := a +FOR i := 0 to 15 + tmp.fp16[i] := (a.fp16[i] < a.fp16[i+16] ? tmp.fp16[i] : a.fp16[i+16]) +ENDFOR +FOR i := 0 to 7 + tmp.fp16[i] := (tmp.fp16[i] < tmp.fp16[i+8] ? tmp.fp16[i] : tmp.fp16[i+8]) +ENDFOR +FOR i := 0 to 3 + tmp.fp16[i] := (tmp.fp16[i] < tmp.fp16[i+4] ? tmp.fp16[i] : tmp.fp16[i+4]) +ENDFOR +FOR i := 0 to 1 + tmp.fp16[i] := (tmp.fp16[i] < tmp.fp16[i+2] ? tmp.fp16[i] : tmp.fp16[i+2]) +ENDFOR +dst.fp16[0] := (tmp.fp16[0] < tmp.fp16[1] ? tmp.fp16[0] : tmp.fp16[1]) + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - Finds the absolute value of each packed half-precision (16-bit) floating-point - element in "v2", storing the results in "dst". - - FOR j := 0 to 31 - dst.fp16[j] := ABS(v2.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + Finds the absolute value of each packed half-precision (16-bit) floating-point element in "v2", storing the results in "dst". + +FOR j := 0 to 31 + dst.fp16[j] := ABS(v2.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - Compute the complex conjugates of complex numbers in "a", and store the results - in "dst". Each complex number is composed of two adjacent half-precision (16-bit) - floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * - vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := a[i+31:i] XOR FP32(-0.0) - ENDFOR - dst[MAX:512] := 0 - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + Compute the complex conjugates of complex numbers in "a", and store the results in "dst". Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := a[i+31:i] XOR FP32(-0.0) +ENDFOR +dst[MAX:512] := 0 + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Compute the complex conjugates of complex numbers in "a", and store the results - in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask - bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) - floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * - vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] XOR FP32(-0.0) - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + + Compute the complex conjugates of complex numbers in "a", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR FP32(-0.0) + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - Compute the complex conjugates of complex numbers in "a", and store the results - in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is - not set). Each complex number is composed of two adjacent half-precision (16-bit) - floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * - vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := a[i+31:i] XOR FP32(-0.0) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - AVX512_FP16 -
immintrin.h
- Arithmetic + + + + Compute the complex conjugates of complex numbers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each complex number is composed of two adjacent half-precision (16-bit) floating-point elements, which defines the complex number "complex = vec.fp16[0] + i * vec.fp16[1]", or the complex conjugate "conjugate = vec.fp16[0] - i * vec.fp16[1]". + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := a[i+31:i] XOR FP32(-0.0) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + AVX512_FP16 +
immintrin.h
+ Arithmetic
- - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k". - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 31 - k[j] := (a.fp16[j] OP b.fp16[j]) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512_FP16 -
immintrin.h
- Compare + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 31 + k[j] := (a.fp16[j] OP b.fp16[j]) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512_FP16 +
immintrin.h
+ Compare
- - - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit - is not set). - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 31 - IF k1[j] - k[j] := ( a.fp16[j] OP b.fp16[j] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512_FP16 -
immintrin.h
- Compare + + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 31 + IF k1[j] + k[j] := ( a.fp16[j] OP b.fp16[j] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512_FP16 +
immintrin.h
+ Compare
- - - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k". [sae_note] - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 31 - k[j] := (a.fp16[j] OP b.fp16[j]) ? 1 : 0 - ENDFOR - k[MAX:32] := 0 - - - AVX512_FP16 -
immintrin.h
- Compare + + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". [sae_note] + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 31 + k[j] := (a.fp16[j] OP b.fp16[j]) ? 1 : 0 +ENDFOR +k[MAX:32] := 0 + + + AVX512_FP16 +
immintrin.h
+ Compare
- - - - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit - is not set). [sae_note] - CASE (imm8[3:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - FOR j := 0 to 31 - IF k1[j] - k[j] := ( a.fp16[j] OP b.fp16[j] ) ? 1 : 0 - ELSE - k[j] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512_FP16 -
immintrin.h
- Compare + + + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] + CASE (imm8[3:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +FOR j := 0 to 31 + IF k1[j] + k[j] := ( a.fp16[j] OP b.fp16[j] ) ? 1 : 0 + ELSE + k[j] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512_FP16 +
immintrin.h
+ Compare
- - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k". - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - k[0] := (a.fp16[0] OP b.fp16[0]) ? 1 : 0 - k[MAX:1] := 0 - - - AVX512_FP16 -
immintrin.h
- Compare + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +k[0] := (a.fp16[0] OP b.fp16[0]) ? 1 : 0 +k[MAX:1] := 0 + + + AVX512_FP16 +
immintrin.h
+ Compare
- - - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k". [sae_note] - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - k[0] := (a.fp16[0] OP b.fp16[0]) ? 1 : 0 - k[MAX:1] := 0 - - - AVX512_FP16 -
immintrin.h
- Compare + + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". [sae_note] + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +k[0] := (a.fp16[0] OP b.fp16[0]) ? 1 : 0 +k[MAX:1] := 0 + + + AVX512_FP16 +
immintrin.h
+ Compare
- - - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set). - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - IF k1[0] - k[0] := ( a.fp16[0] OP b.fp16[0] ) ? 1 : 0 - ELSE - k[0] := 0 - FI - k[MAX:1] := 0 - - - AVX512_FP16 -
immintrin.h
- Compare + + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set). + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +IF k1[0] + k[0] := ( a.fp16[0] OP b.fp16[0] ) ? 1 : 0 +ELSE + k[0] := 0 +FI +k[MAX:1] := 0 + + + AVX512_FP16 +
immintrin.h
+ Compare
- - - - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b" - based on the comparison operand specified by "imm8", and store the results in mask - vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set). - [sae_note] - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - IF k1[0] - k[0] := ( a.fp16[0] OP b.fp16[0] ) ? 1 : 0 - ELSE - k[0] := 0 - FI - k[MAX:1] := 0 - - - AVX512_FP16 -
immintrin.h
- Compare + + + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set). [sae_note] + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +IF k1[0] + k[0] := ( a.fp16[0] OP b.fp16[0] ) ? 1 : 0 +ELSE + k[0] := 0 +FI +k[MAX:1] := 0 + + + AVX512_FP16 +
immintrin.h
+ Compare
- - - - - Compare the lower half-precision (16-bit) floating-point elements in "a" and - "b" based on the comparison operand specified by "imm8", and return the boolean result - (0 or 1). - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - RETURN ( a.fp16[0] OP b.fp16[0] ) ? 1 : 0 - - - AVX512_FP16 -
immintrin.h
- Compare + + + + + Compare the lower half-precision (16-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and return the boolean result (0 or 1). + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +RETURN ( a.fp16[0] OP b.fp16[0] ) ? 1 : 0 + + + AVX512_FP16 +
immintrin.h
+ Compare
- - - - - - Compare the lower half-precision (16-bit) floating-point elements in "a" and - "b" based on the comparison operand specified by "imm8", and return the boolean result - (0 or 1). [sae_note] - CASE (imm8[4:0]) OF - 0: OP := _CMP_EQ_OQ - 1: OP := _CMP_LT_OS - 2: OP := _CMP_LE_OS - 3: OP := _CMP_UNORD_Q - 4: OP := _CMP_NEQ_UQ - 5: OP := _CMP_NLT_US - 6: OP := _CMP_NLE_US - 7: OP := _CMP_ORD_Q - 8: OP := _CMP_EQ_UQ - 9: OP := _CMP_NGE_US - 10: OP := _CMP_NGT_US - 11: OP := _CMP_FALSE_OQ - 12: OP := _CMP_NEQ_OQ - 13: OP := _CMP_GE_OS - 14: OP := _CMP_GT_OS - 15: OP := _CMP_TRUE_UQ - 16: OP := _CMP_EQ_OS - 17: OP := _CMP_LT_OQ - 18: OP := _CMP_LE_OQ - 19: OP := _CMP_UNORD_S - 20: OP := _CMP_NEQ_US - 21: OP := _CMP_NLT_UQ - 22: OP := _CMP_NLE_UQ - 23: OP := _CMP_ORD_S - 24: OP := _CMP_EQ_US - 25: OP := _CMP_NGE_UQ - 26: OP := _CMP_NGT_UQ - 27: OP := _CMP_FALSE_OS - 28: OP := _CMP_NEQ_OS - 29: OP := _CMP_GE_OQ - 30: OP := _CMP_GT_OQ - 31: OP := _CMP_TRUE_US - ESAC - RETURN ( a.fp16[0] OP b.fp16[0] ) ? 1 : 0 - - - AVX512_FP16 -
immintrin.h
- Compare + + + + + + Compare the lower half-precision (16-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and return the boolean result (0 or 1). [sae_note] + CASE (imm8[4:0]) OF +0: OP := _CMP_EQ_OQ +1: OP := _CMP_LT_OS +2: OP := _CMP_LE_OS +3: OP := _CMP_UNORD_Q +4: OP := _CMP_NEQ_UQ +5: OP := _CMP_NLT_US +6: OP := _CMP_NLE_US +7: OP := _CMP_ORD_Q +8: OP := _CMP_EQ_UQ +9: OP := _CMP_NGE_US +10: OP := _CMP_NGT_US +11: OP := _CMP_FALSE_OQ +12: OP := _CMP_NEQ_OQ +13: OP := _CMP_GE_OS +14: OP := _CMP_GT_OS +15: OP := _CMP_TRUE_UQ +16: OP := _CMP_EQ_OS +17: OP := _CMP_LT_OQ +18: OP := _CMP_LE_OQ +19: OP := _CMP_UNORD_S +20: OP := _CMP_NEQ_US +21: OP := _CMP_NLT_UQ +22: OP := _CMP_NLE_UQ +23: OP := _CMP_ORD_S +24: OP := _CMP_EQ_US +25: OP := _CMP_NGE_UQ +26: OP := _CMP_NGT_UQ +27: OP := _CMP_FALSE_OS +28: OP := _CMP_NEQ_OS +29: OP := _CMP_GE_OQ +30: OP := _CMP_GT_OQ +31: OP := _CMP_TRUE_US +ESAC +RETURN ( a.fp16[0] OP b.fp16[0] ) ? 1 : 0 + + + AVX512_FP16 +
immintrin.h
+ Compare
- - - - Compare the lower half-precision (16-bit) floating-point elements in "a" and - "b" for equality, and return the boolean result (0 or 1). - RETURN ( a.fp16[0] !=NaN AND b.fp16[0] !=NaN AND a.fp16[0] == b.fp16[0] ) ? 1 : 0 - - - AVX512_FP16 -
immintrin.h
- Compare + + + + Compare the lower half-precision (16-bit) floating-point elements in "a" and "b" for equality, and return the boolean result (0 or 1). + RETURN ( a.fp16[0] !=NaN AND b.fp16[0] !=NaN AND a.fp16[0] == b.fp16[0] ) ? 1 : 0 + + + AVX512_FP16 +
immintrin.h
+ Compare
- - - - Compare the lower half-precision (16-bit) floating-point elements in "a" and - "b" for less-than, and return the boolean result (0 or 1). - RETURN ( a.fp16[0] !=NaN AND b.fp16[0] !=NaN AND a.fp16[0] < b.fp16[0] ) ? 1 : - 0 - - - AVX512_FP16 -
immintrin.h
- Compare + + + + Compare the lower half-precision (16-bit) floating-point elements in "a" and "b" for less-than, and return the boolean result (0 or 1). + RETURN ( a.fp16[0] !=NaN AND b.fp16[0] !=NaN AND a.fp16[0] < b.fp16[0] ) ? 1 : 0 + + + AVX512_FP16 +
immintrin.h
+ Compare
- - - - Compare the lower half-precision (16-bit) floating-point elements in "a" and - "b" for less-than-or-equal, and return the boolean result (0 or 1). - RETURN ( a.fp16[0] !=NaN AND b.fp16[0] !=NaN AND a.fp16[0] <= b.fp16[0] ) ? 1 - : 0 - - - AVX512_FP16 -
immintrin.h
- Compare + + + + Compare the lower half-precision (16-bit) floating-point elements in "a" and "b" for less-than-or-equal, and return the boolean result (0 or 1). + RETURN ( a.fp16[0] !=NaN AND b.fp16[0] !=NaN AND a.fp16[0] <= b.fp16[0] ) ? 1 : 0 + + + AVX512_FP16 +
immintrin.h
+ Compare
- - - - Compare the lower half-precision (16-bit) floating-point elements in "a" and - "b" for greater-than, and return the boolean result (0 or 1). - RETURN ( a.fp16[0] !=NaN AND b.fp16[0] !=NaN AND a.fp16[0] > b.fp16[0] ) ? 1 : - 0 - - - AVX512_FP16 -
immintrin.h
- Compare + + + + Compare the lower half-precision (16-bit) floating-point elements in "a" and "b" for greater-than, and return the boolean result (0 or 1). + RETURN ( a.fp16[0] !=NaN AND b.fp16[0] !=NaN AND a.fp16[0] > b.fp16[0] ) ? 1 : 0 + + + AVX512_FP16 +
immintrin.h
+ Compare
- - - - Compare the lower half-precision (16-bit) floating-point elements in "a" and - "b" for greater-than-or-equal, and return the boolean result (0 or 1). - RETURN ( a.fp16[0] !=NaN AND b.fp16[0] !=NaN AND a.fp16[0] >= b.fp16[0] ) ? 1 - : 0 - - - AVX512_FP16 -
immintrin.h
- Compare + + + + Compare the lower half-precision (16-bit) floating-point elements in "a" and "b" for greater-than-or-equal, and return the boolean result (0 or 1). + RETURN ( a.fp16[0] !=NaN AND b.fp16[0] !=NaN AND a.fp16[0] >= b.fp16[0] ) ? 1 : 0 + + + AVX512_FP16 +
immintrin.h
+ Compare
- - - - Compare the lower half-precision (16-bit) floating-point elements in "a" and - "b" for not-equal, and return the boolean result (0 or 1). - RETURN ( a.fp16[0] ==NaN OR b.fp16[0] ==NaN OR a.fp16[0] != b.fp16[0] ) ? 1 : 0 - - - AVX512_FP16 -
immintrin.h
- Compare + + + + Compare the lower half-precision (16-bit) floating-point elements in "a" and "b" for not-equal, and return the boolean result (0 or 1). + RETURN ( a.fp16[0] ==NaN OR b.fp16[0] ==NaN OR a.fp16[0] != b.fp16[0] ) ? 1 : 0 + + + AVX512_FP16 +
immintrin.h
+ Compare
- - - - Compare the lower half-precision (16-bit) floating-point elements in "a" and - "b" for equality, and return the boolean result (0 or 1). This instruction will not - signal an exception for QNaNs. - RETURN ( a.fp16[0] !=NaN AND b.fp16[0] !=NaN AND a.fp16[0] == b.fp16[0] ) ? 1 : 0 - - - AVX512_FP16 -
immintrin.h
- Compare + + + + Compare the lower half-precision (16-bit) floating-point elements in "a" and "b" for equality, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + RETURN ( a.fp16[0] !=NaN AND b.fp16[0] !=NaN AND a.fp16[0] == b.fp16[0] ) ? 1 : 0 + + + AVX512_FP16 +
immintrin.h
+ Compare
- - - - Compare the lower half-precision (16-bit) floating-point elements in "a" and - "b" for less-than, and return the boolean result (0 or 1). This instruction will not - signal an exception for QNaNs. - RETURN ( a.fp16[0] !=NaN AND b.fp16[0] !=NaN AND a.fp16[0] < b.fp16[0] ) ? 1 : - 0 - - - AVX512_FP16 -
immintrin.h
- Compare + + + + Compare the lower half-precision (16-bit) floating-point elements in "a" and "b" for less-than, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + RETURN ( a.fp16[0] !=NaN AND b.fp16[0] !=NaN AND a.fp16[0] < b.fp16[0] ) ? 1 : 0 + + + AVX512_FP16 +
immintrin.h
+ Compare
- - - - Compare the lower half-precision (16-bit) floating-point elements in "a" and - "b" for less-than-or-equal, and return the boolean result (0 or 1). This instruction - will not signal an exception for QNaNs. - RETURN ( a.fp16[0] !=NaN AND b.fp16[0] !=NaN AND a.fp16[0] <= b.fp16[0] ) ? 1 - : 0 - - - AVX512_FP16 -
immintrin.h
- Compare + + + + Compare the lower half-precision (16-bit) floating-point elements in "a" and "b" for less-than-or-equal, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + RETURN ( a.fp16[0] !=NaN AND b.fp16[0] !=NaN AND a.fp16[0] <= b.fp16[0] ) ? 1 : 0 + + + AVX512_FP16 +
immintrin.h
+ Compare
- - - - Compare the lower half-precision (16-bit) floating-point elements in "a" and - "b" for greater-than, and return the boolean result (0 or 1). This instruction will not - signal an exception for QNaNs. - RETURN ( a.fp16[0] !=NaN AND b.fp16[0] !=NaN AND a.fp16[0] > b.fp16[0] ) ? 1 : - 0 - - - AVX512_FP16 -
immintrin.h
- Compare + + + + Compare the lower half-precision (16-bit) floating-point elements in "a" and "b" for greater-than, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + RETURN ( a.fp16[0] !=NaN AND b.fp16[0] !=NaN AND a.fp16[0] > b.fp16[0] ) ? 1 : 0 + + + AVX512_FP16 +
immintrin.h
+ Compare
- - - - Compare the lower half-precision (16-bit) floating-point elements in "a" and - "b" for greater-than-or-equal, and return the boolean result (0 or 1). This instruction - will not signal an exception for QNaNs. - RETURN ( a.fp16[0] !=NaN AND b.fp16[0] !=NaN AND a.fp16[0] >= b.fp16[0] ) ? 1 - : 0 - - - AVX512_FP16 -
immintrin.h
- Compare + + + + Compare the lower half-precision (16-bit) floating-point elements in "a" and "b" for greater-than-or-equal, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + RETURN ( a.fp16[0] !=NaN AND b.fp16[0] !=NaN AND a.fp16[0] >= b.fp16[0] ) ? 1 : 0 + + + AVX512_FP16 +
immintrin.h
+ Compare
- - - - Compare the lower half-precision (16-bit) floating-point elements in "a" and - "b" for not-equal, and return the boolean result (0 or 1). This instruction will not - signal an exception for QNaNs. - RETURN ( a.fp16[0] ==NaN OR b.fp16[0] ==NaN OR a.fp16[0] != b.fp16[0] ) ? 1 : 0 - - - AVX512_FP16 -
immintrin.h
- Compare + + + + Compare the lower half-precision (16-bit) floating-point elements in "a" and "b" for not-equal, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + RETURN ( a.fp16[0] ==NaN OR b.fp16[0] ==NaN OR a.fp16[0] != b.fp16[0] ) ? 1 : 0 + + + AVX512_FP16 +
immintrin.h
+ Compare
- - - Convert packed signed 16-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst". - - FOR j := 0 TO 31 - dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert packed signed 16-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 TO 31 + dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed signed 16-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst". - [round_note] - - FOR j := 0 TO 31 - dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed signed 16-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + [round_note] + +FOR j := 0 TO 31 + dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed signed 16-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 31 - IF k[j] - dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed signed 16-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 31 + IF k[j] + dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert packed signed 16-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 TO 31 - IF k[j] - dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert packed signed 16-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 31 + IF k[j] + dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed signed 16-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 31 - IF k[j] - dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed signed 16-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 31 + IF k[j] + dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed signed 16-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 TO 31 - IF k[j] - dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed signed 16-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 31 + IF k[j] + dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert packed unsigned 16-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 TO 31 - dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert packed unsigned 16-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 TO 31 + dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed unsigned 16-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst". - [round_note] - - FOR j := 0 TO 31 - dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed unsigned 16-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + [round_note] + +FOR j := 0 TO 31 + dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 16-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 31 - IF k[j] - dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert -
- - - - - - - Convert packed unsigned 16-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 TO 31 - IF k[j] - dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed unsigned 16-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 31 + IF k[j] + dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 16-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 31 - IF k[j] - dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + + Convert packed unsigned 16-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 31 + IF k[j] + dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert +
+ + + + + Convert packed unsigned 16-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 31 + IF k[j] + dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 16-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 TO 31 - IF k[j] - dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed unsigned 16-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 31 + IF k[j] + dst.fp16[j] := Convert_Int16_To_FP16(a.word[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert packed signed 32-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst". - - FOR j := 0 TO 15 - dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert packed signed 32-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 TO 15 + dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed signed 32-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst". - [round_note] - - FOR j := 0 TO 15 - dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed signed 32-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + [round_note] + +FOR j := 0 TO 15 + dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed signed 32-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed signed 32-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert packed signed 32-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 TO 15 - IF k[j] - dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert packed signed 32-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 15 + IF k[j] + dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed signed 32-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed signed 32-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed signed 32-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 TO 15 - IF k[j] - dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed signed 32-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 15 + IF k[j] + dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert packed unsigned 32-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 TO 15 - dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert packed unsigned 32-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 TO 15 + dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed unsigned 32-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst". - [round_note] - - FOR j := 0 TO 15 - dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed unsigned 32-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + [round_note] + +FOR j := 0 TO 15 + dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 32-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed unsigned 32-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert packed unsigned 32-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 TO 15 - IF k[j] - dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert packed unsigned 32-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 15 + IF k[j] + dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed unsigned 32-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed unsigned 32-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 32-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 TO 15 - IF k[j] - dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed unsigned 32-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 15 + IF k[j] + dst.fp16[j] := Convert_Int32_To_FP16(a.dword[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert packed signed 64-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst". - - FOR j := 0 TO 7 - dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert packed signed 64-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 TO 7 + dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed signed 64-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst". - [round_note] - - FOR j := 0 TO 7 - dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed signed 64-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + [round_note] + +FOR j := 0 TO 7 + dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed signed 64-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed signed 64-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert packed signed 64-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 TO 7 - IF k[j] - dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert packed signed 64-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 7 + IF k[j] + dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed signed 64-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed signed 64-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed signed 64-bit integers in "a" to packed half-precision (16-bit) - floating-point elements, and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 TO 7 - IF k[j] - dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed signed 64-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 7 + IF k[j] + dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert packed unsigned 64-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 TO 7 - dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert packed unsigned 64-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 TO 7 + dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed unsigned 64-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst". - [round_note] - - FOR j := 0 TO 7 - dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + [round_note] + +FOR j := 0 TO 7 + dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 64-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed unsigned 64-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert packed unsigned 64-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 TO 7 - IF k[j] - dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert packed unsigned 64-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 7 + IF k[j] + dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed unsigned 64-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed unsigned 64-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed unsigned 64-bit integers in "a" to packed half-precision - (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 TO 7 - IF k[j] - dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed unsigned 64-bit integers in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 7 + IF k[j] + dst.fp16[j] := Convert_Int64_To_FP16(a.qword[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 TO 7 - dst.fp16[j] := Convert_FP64_To_FP16(a.fp64[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 TO 7 + dst.fp16[j] := Convert_FP64_To_FP16(a.fp64[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst". - [round_note] - - FOR j := 0 TO 7 - dst.fp16[j] := Convert_FP64_To_FP16(a.fp64[j]) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + [round_note] + +FOR j := 0 TO 7 + dst.fp16[j] := Convert_FP64_To_FP16(a.fp64[j]) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 TO 7 - IF k[j] - dst.fp16[j] := Convert_FP64_To_FP16(a.fp64[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.fp16[j] := Convert_FP64_To_FP16(a.fp64[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - [round_note] - - FOR j := 0 TO 7 - IF k[j] - dst.fp16[j] := Convert_FP64_To_FP16(a.fp64[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 7 + IF k[j] + dst.fp16[j] := Convert_FP64_To_FP16(a.fp64[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.fp16[j] := Convert_FP64_To_FP16(a.fp64[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.fp16[j] := Convert_FP64_To_FP16(a.fp64[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 TO 7 - IF k[j] - dst.fp16[j] := Convert_FP64_To_FP16(a.fp64[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 7 + IF k[j] + dst.fp16[j] := Convert_FP64_To_FP16(a.fp64[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert the lower double-precision (64-bit) floating-point element in "b" to a - half-precision (16-bit) floating-point elements, store the result in the lower element - of "dst", and copy the upper 7 packed elements from "a" to the upper element of "dst". - - dst.fp16[0] := Convert_FP64_To_FP16(b.fp64[0]) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert the lower double-precision (64-bit) floating-point element in "b" to a half-precision (16-bit) floating-point elements, store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper element of "dst". + +dst.fp16[0] := Convert_FP64_To_FP16(b.fp64[0]) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert the lower double-precision (64-bit) floating-point element in "b" to a - half-precision (16-bit) floating-point elements, store the result in the lower element - of "dst", and copy the upper 7 packed elements from "a" to the upper element of "dst". - [round_note] - - dst.fp16[0] := Convert_FP64_To_FP16(b.fp64[0]) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert the lower double-precision (64-bit) floating-point element in "b" to a half-precision (16-bit) floating-point elements, store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper element of "dst". + [round_note] + +dst.fp16[0] := Convert_FP64_To_FP16(b.fp64[0]) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert the lower double-precision (64-bit) floating-point element in "b" to a - half-precision (16-bit) floating-point elements, store the result in the lower element - of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not - set), and copy the upper 7 packed elements from "a" to the upper element of "dst". - - IF k[0] - dst.fp16[0] := Convert_FP64_To_FP16(b.fp64[0]) - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert the lower double-precision (64-bit) floating-point element in "b" to a half-precision (16-bit) floating-point elements, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper element of "dst". + +IF k[0] + dst.fp16[0] := Convert_FP64_To_FP16(b.fp64[0]) +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - - Convert the lower double-precision (64-bit) floating-point element in "b" to a - half-precision (16-bit) floating-point elements, store the result in the lower element - of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not - set), and copy the upper 7 packed elements from "a" to the upper element of "dst". - [round_note] - - IF k[0] - dst.fp16[0] := Convert_FP64_To_FP16(b.fp64[0]) - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + + Convert the lower double-precision (64-bit) floating-point element in "b" to a half-precision (16-bit) floating-point elements, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst.fp16[0] := Convert_FP64_To_FP16(b.fp64[0]) +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert the lower double-precision (64-bit) floating-point element in "b" to a - half-precision (16-bit) floating-point elements, store the result in the lower element - of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and - copy the upper 7 packed elements from "a" to the upper element of "dst". - - IF k[0] - dst.fp16[0] := Convert_FP64_To_FP16(b.fp64[0]) - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert the lower double-precision (64-bit) floating-point element in "b" to a half-precision (16-bit) floating-point elements, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper element of "dst". + +IF k[0] + dst.fp16[0] := Convert_FP64_To_FP16(b.fp64[0]) +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert the lower double-precision (64-bit) floating-point element in "b" to a - half-precision (16-bit) floating-point elements, store the result in the lower element - of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and - copy the upper 7 packed elements from "a" to the upper element of "dst". - [round_note] - - IF k[0] - dst.fp16[0] := Convert_FP64_To_FP16(b.fp64[0]) - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert the lower double-precision (64-bit) floating-point element in "b" to a half-precision (16-bit) floating-point elements, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper element of "dst". + [round_note] + +IF k[0] + dst.fp16[0] := Convert_FP64_To_FP16(b.fp64[0]) +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 15 - dst.fp16[j] := Convert_FP32_To_FP16(a.fp32[j]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 15 + dst.fp16[j] := Convert_FP32_To_FP16(a.fp32[j]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst". - [round_note] - - FOR j := 0 to 15 - dst.fp16[j] := Convert_FP32_To_FP16(a.fp32[j]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + [round_note] + +FOR j := 0 to 15 + dst.fp16[j] := Convert_FP32_To_FP16(a.fp32[j]) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - - FOR j := 0 to 15 - IF k[j] - dst.fp16[j] := Convert_FP32_To_FP16(a.fp32[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + dst.fp16[j] := Convert_FP32_To_FP16(a.fp32[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using writemask "k" (elements are copied from "src" when the corresponding mask bit is - not set). - [round_note] - - FOR j := 0 to 15 - IF k[j] - dst.fp16[j] := Convert_FP32_To_FP16(a.fp32[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + IF k[j] + dst.fp16[j] := Convert_FP32_To_FP16(a.fp32[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - IF k[j] - dst.fp16[j] := Convert_FP32_To_FP16(a.fp32[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + dst.fp16[j] := Convert_FP32_To_FP16(a.fp32[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst" - using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 to 15 - IF k[j] - dst.fp16[j] := Convert_FP32_To_FP16(a.fp32[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 to 15 + IF k[j] + dst.fp16[j] := Convert_FP32_To_FP16(a.fp32[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert the lower single-precision (32-bit) floating-point element in "b" to a - half-precision (16-bit) floating-point elements, store the result in the lower element - of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". - - dst.fp16[0] := Convert_FP32_To_FP16(b.fp32[0]) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert the lower single-precision (32-bit) floating-point element in "b" to a half-precision (16-bit) floating-point elements, store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +dst.fp16[0] := Convert_FP32_To_FP16(b.fp32[0]) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert the lower single-precision (32-bit) floating-point element in "b" to a - half-precision (16-bit) floating-point elements, store the result in the lower element - of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". - [round_note] - - dst.fp16[0] := Convert_FP32_To_FP16(b.fp32[0]) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert the lower single-precision (32-bit) floating-point element in "b" to a half-precision (16-bit) floating-point elements, store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst.fp16[0] := Convert_FP32_To_FP16(b.fp32[0]) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert the lower single-precision (32-bit) floating-point element in "b" to a - half-precision (16-bit) floating-point elements, store the result in the lower element - of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not - set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". - - IF k[0] - dst.fp16[0] := Convert_FP32_To_FP16(b.fp32[0]) - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert the lower single-precision (32-bit) floating-point element in "b" to a half-precision (16-bit) floating-point elements, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := Convert_FP32_To_FP16(b.fp32[0]) +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - - Convert the lower single-precision (32-bit) floating-point element in "b" to a - half-precision (16-bit) floating-point elements, store the result in the lower element - of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not - set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". - [round_note] - - IF k[0] - dst.fp16[0] := Convert_FP32_To_FP16(b.fp32[0]) - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + + Convert the lower single-precision (32-bit) floating-point element in "b" to a half-precision (16-bit) floating-point elements, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst.fp16[0] := Convert_FP32_To_FP16(b.fp32[0]) +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert the lower single-precision (32-bit) floating-point element in "b" to a - half-precision (16-bit) floating-point elements, store the result in the lower element - of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and - copy the upper 7 packed elements from "a" to the upper elements of "dst". - - IF k[0] - dst.fp16[0] := Convert_FP32_To_FP16(b.fp32[0]) - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert the lower single-precision (32-bit) floating-point element in "b" to a half-precision (16-bit) floating-point elements, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := Convert_FP32_To_FP16(b.fp32[0]) +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert the lower single-precision (32-bit) floating-point element in "b" to a - half-precision (16-bit) floating-point elements, store the result in the lower element - of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and - copy the upper 7 packed elements from "a" to the upper elements of "dst". - [round_note] - - IF k[0] - dst.fp16[0] := Convert_FP32_To_FP16(b.fp32[0]) - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert the lower single-precision (32-bit) floating-point element in "b" to a half-precision (16-bit) floating-point elements, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst.fp16[0] := Convert_FP32_To_FP16(b.fp32[0]) +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 32-bit integers, and store the results in "dst". - - FOR j := 0 TO 15 - dst.dword[j] := Convert_FP16_To_Int32(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 TO 15 + dst.dword[j] := Convert_FP16_To_Int32(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 32-bit integers, and store the results in "dst". - [round_note] - - FOR j := 0 TO 15 - dst.dword[j] := Convert_FP16_To_Int32(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst". + [round_note] + +FOR j := 0 TO 15 + dst.dword[j] := Convert_FP16_To_Int32(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.dword[j] := Convert_FP16_To_Int32(a.fp16[j]) - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.dword[j] := Convert_FP16_To_Int32(a.fp16[j]) + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 TO 15 - IF k[j] - dst.dword[j] := Convert_FP16_To_Int32(a.fp16[j]) - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 15 + IF k[j] + dst.dword[j] := Convert_FP16_To_Int32(a.fp16[j]) + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.dword[j] := Convert_FP16_To_Int32(a.fp16[j]) - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.dword[j] := Convert_FP16_To_Int32(a.fp16[j]) + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 TO 15 - IF k[j] - dst.dword[j] := Convert_FP16_To_Int32(a.fp16[j]) - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 15 + IF k[j] + dst.dword[j] := Convert_FP16_To_Int32(a.fp16[j]) + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 32-bit integers with truncation, and store the results in "dst". - - FOR j := 0 TO 15 - dst.dword[j] := Convert_FP16_To_Int32_Truncate(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 TO 15 + dst.dword[j] := Convert_FP16_To_Int32_Truncate(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 32-bit integers with truncation, and store the results in "dst". [sae_note] - - FOR j := 0 TO 15 - dst.dword[j] := Convert_FP16_To_Int32_Truncate(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". [sae_note] + +FOR j := 0 TO 15 + dst.dword[j] := Convert_FP16_To_Int32_Truncate(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 32-bit integers with truncation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.dword[j] := Convert_FP16_To_Int32_Truncate(a.fp16[j]) - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.dword[j] := Convert_FP16_To_Int32_Truncate(a.fp16[j]) + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 32-bit integers with truncation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). [sae_note] - - FOR j := 0 TO 15 - IF k[j] - dst.dword[j] := Convert_FP16_To_Int32_Truncate(a.fp16[j]) - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 TO 15 + IF k[j] + dst.dword[j] := Convert_FP16_To_Int32_Truncate(a.fp16[j]) + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 32-bit integers with truncation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.dword[j] := Convert_FP16_To_Int32_Truncate(a.fp16[j]) - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.dword[j] := Convert_FP16_To_Int32_Truncate(a.fp16[j]) + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 32-bit integers with truncation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). [sae_note] - - FOR j := 0 TO 15 - IF k[j] - dst.dword[j] := Convert_FP16_To_Int32_Truncate(a.fp16[j]) - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 TO 15 + IF k[j] + dst.dword[j] := Convert_FP16_To_Int32_Truncate(a.fp16[j]) + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 32-bit integers, and store the results in "dst". - - FOR j := 0 TO 15 - dst.dword[j] := Convert_FP16_To_UInt32(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst". + +FOR j := 0 TO 15 + dst.dword[j] := Convert_FP16_To_UInt32(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 32-bit integers, and store the results in "dst". - [round_note] - - FOR j := 0 TO 15 - dst.dword[j] := Convert_FP16_To_UInt32(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst". + [round_note] + +FOR j := 0 TO 15 + dst.dword[j] := Convert_FP16_To_UInt32(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.dword[j] := Convert_FP16_To_UInt32(a.fp16[j]) - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.dword[j] := Convert_FP16_To_UInt32(a.fp16[j]) + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 TO 15 - IF k[j] - dst.dword[j] := Convert_FP16_To_UInt32(a.fp16[j]) - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 15 + IF k[j] + dst.dword[j] := Convert_FP16_To_UInt32(a.fp16[j]) + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.dword[j] := Convert_FP16_To_UInt32(a.fp16[j]) - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.dword[j] := Convert_FP16_To_UInt32(a.fp16[j]) + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 TO 15 - IF k[j] - dst.dword[j] := Convert_FP16_To_UInt32(a.fp16[j]) - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 15 + IF k[j] + dst.dword[j] := Convert_FP16_To_UInt32(a.fp16[j]) + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 32-bit integers with truncation, and store the results in "dst". - - FOR j := 0 TO 15 - dst.dword[j] := Convert_FP16_To_UInt32_Truncate(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 TO 15 + dst.dword[j] := Convert_FP16_To_UInt32_Truncate(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 32-bit integers with truncation, and store the results in "dst". [sae_note] - - FOR j := 0 TO 15 - dst.dword[j] := Convert_FP16_To_UInt32_Truncate(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst". [sae_note] + +FOR j := 0 TO 15 + dst.dword[j] := Convert_FP16_To_UInt32_Truncate(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 32-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.dword[j] := Convert_FP16_To_UInt32_Truncate(a.fp16[j]) - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.dword[j] := Convert_FP16_To_UInt32_Truncate(a.fp16[j]) + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 32-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - [sae_note] - - FOR j := 0 TO 15 - IF k[j] - dst.dword[j] := Convert_FP16_To_UInt32_Truncate(a.fp16[j]) - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 TO 15 + IF k[j] + dst.dword[j] := Convert_FP16_To_UInt32_Truncate(a.fp16[j]) + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 15 - IF k[j] - dst.dword[j] := Convert_FP16_To_UInt32_Truncate(a.fp16[j]) - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 15 + IF k[j] + dst.dword[j] := Convert_FP16_To_UInt32_Truncate(a.fp16[j]) + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] - - FOR j := 0 TO 15 - IF k[j] - dst.dword[j] := Convert_FP16_To_UInt32_Truncate(a.fp16[j]) - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 TO 15 + IF k[j] + dst.dword[j] := Convert_FP16_To_UInt32_Truncate(a.fp16[j]) + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 64-bit integers, and store the results in "dst". - - FOR j := 0 TO 7 - dst.qword[j] := Convert_FP16_To_Int64(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 TO 7 + dst.qword[j] := Convert_FP16_To_Int64(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 64-bit integers, and store the results in "dst". - [round_note] - - FOR j := 0 TO 7 - dst.qword[j] := Convert_FP16_To_Int64(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst". + [round_note] + +FOR j := 0 TO 7 + dst.qword[j] := Convert_FP16_To_Int64(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.qword[j] := Convert_FP16_To_Int64(a.fp16[j]) - ELSE - dst.qword[j] := src.qword[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.qword[j] := Convert_FP16_To_Int64(a.fp16[j]) + ELSE + dst.qword[j] := src.qword[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 TO 7 - IF k[j] - dst.qword[j] := Convert_FP16_To_Int64(a.fp16[j]) - ELSE - dst.qword[j] := src.qword[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 7 + IF k[j] + dst.qword[j] := Convert_FP16_To_Int64(a.fp16[j]) + ELSE + dst.qword[j] := src.qword[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.qword[j] := Convert_FP16_To_Int64(a.fp16[j]) - ELSE - dst.qword[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.qword[j] := Convert_FP16_To_Int64(a.fp16[j]) + ELSE + dst.qword[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 TO 7 - IF k[j] - dst.qword[j] := Convert_FP16_To_Int64(a.fp16[j]) - ELSE - dst.qword[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 7 + IF k[j] + dst.qword[j] := Convert_FP16_To_Int64(a.fp16[j]) + ELSE + dst.qword[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 64-bit integers with truncation, and store the results in "dst". - - FOR j := 0 TO 7 - dst.qword[j] := Convert_FP16_To_Int64_Truncate(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 TO 7 + dst.qword[j] := Convert_FP16_To_Int64_Truncate(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 64-bit integers with truncation, and store the results in "dst". [sae_note] - - FOR j := 0 TO 7 - dst.qword[j] := Convert_FP16_To_Int64_Truncate(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst". [sae_note] + +FOR j := 0 TO 7 + dst.qword[j] := Convert_FP16_To_Int64_Truncate(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 64-bit integers with truncation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.qword[j] := Convert_FP16_To_Int64_Truncate(a.fp16[j]) - ELSE - dst.qword[j] := src.qword[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.qword[j] := Convert_FP16_To_Int64_Truncate(a.fp16[j]) + ELSE + dst.qword[j] := src.qword[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 64-bit integers with truncation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). [sae_note] - - FOR j := 0 TO 7 - IF k[j] - dst.qword[j] := Convert_FP16_To_Int64_Truncate(a.fp16[j]) - ELSE - dst.qword[j] := src.qword[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 TO 7 + IF k[j] + dst.qword[j] := Convert_FP16_To_Int64_Truncate(a.fp16[j]) + ELSE + dst.qword[j] := src.qword[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 64-bit integers with truncation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.qword[j] := Convert_FP16_To_Int64_Truncate(a.fp16[j]) - ELSE - dst.qword[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.qword[j] := Convert_FP16_To_Int64_Truncate(a.fp16[j]) + ELSE + dst.qword[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 64-bit integers with truncation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). [sae_note] - - FOR j := 0 TO 7 - IF k[j] - dst.qword[j] := Convert_FP16_To_Int64_Truncate(a.fp16[j]) - ELSE - dst.qword[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 TO 7 + IF k[j] + dst.qword[j] := Convert_FP16_To_Int64_Truncate(a.fp16[j]) + ELSE + dst.qword[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 64-bit integers, and store the results in "dst". - - FOR j := 0 TO 7 - dst.qword[j] := Convert_FP16_To_UInt64(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst". + +FOR j := 0 TO 7 + dst.qword[j] := Convert_FP16_To_UInt64(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 64-bit integers, and store the results in "dst". - [round_note] - - FOR j := 0 TO 7 - dst.qword[j] := Convert_FP16_To_UInt64(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst". + [round_note] + +FOR j := 0 TO 7 + dst.qword[j] := Convert_FP16_To_UInt64(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.qword[j] := Convert_FP16_To_UInt64(a.fp16[j]) - ELSE - dst.qword[j] := src.qword[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.qword[j] := Convert_FP16_To_UInt64(a.fp16[j]) + ELSE + dst.qword[j] := src.qword[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 TO 7 - IF k[j] - dst.qword[j] := Convert_FP16_To_UInt64(a.fp16[j]) - ELSE - dst.qword[j] := src.qword[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 7 + IF k[j] + dst.qword[j] := Convert_FP16_To_UInt64(a.fp16[j]) + ELSE + dst.qword[j] := src.qword[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.qword[j] := Convert_FP16_To_UInt64(a.fp16[j]) - ELSE - dst.qword[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.qword[j] := Convert_FP16_To_UInt64(a.fp16[j]) + ELSE + dst.qword[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 TO 7 - IF k[j] - dst.qword[j] := Convert_FP16_To_UInt64(a.fp16[j]) - ELSE - dst.qword[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 7 + IF k[j] + dst.qword[j] := Convert_FP16_To_UInt64(a.fp16[j]) + ELSE + dst.qword[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 64-bit integers with truncation, and store the results in "dst". - - FOR j := 0 TO 7 - dst.qword[j] := Convert_FP16_To_UInt64_Truncate(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst". + +FOR j := 0 TO 7 + dst.qword[j] := Convert_FP16_To_UInt64_Truncate(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 64-bit integers with truncation, and store the results in "dst". [sae_note] - - FOR j := 0 TO 7 - dst.qword[j] := Convert_FP16_To_UInt64_Truncate(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst". [sae_note] + +FOR j := 0 TO 7 + dst.qword[j] := Convert_FP16_To_UInt64_Truncate(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 64-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.qword[j] := Convert_FP16_To_UInt64_Truncate(a.fp16[j]) - ELSE - dst.qword[j] := src.qword[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.qword[j] := Convert_FP16_To_UInt64_Truncate(a.fp16[j]) + ELSE + dst.qword[j] := src.qword[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 64-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - [sae_note] - - FOR j := 0 TO 7 - IF k[j] - dst.qword[j] := Convert_FP16_To_UInt64_Truncate(a.fp16[j]) - ELSE - dst.qword[j] := src.qword[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 TO 7 + IF k[j] + dst.qword[j] := Convert_FP16_To_UInt64_Truncate(a.fp16[j]) + ELSE + dst.qword[j] := src.qword[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 7 - IF k[j] - dst.qword[j] := Convert_FP16_To_UInt64_Truncate(a.fp16[j]) - ELSE - dst.qword[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 7 + IF k[j] + dst.qword[j] := Convert_FP16_To_UInt64_Truncate(a.fp16[j]) + ELSE + dst.qword[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] - - FOR j := 0 TO 7 - IF k[j] - dst.qword[j] := Convert_FP16_To_UInt64_Truncate(a.fp16[j]) - ELSE - dst.qword[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 TO 7 + IF k[j] + dst.qword[j] := Convert_FP16_To_UInt64_Truncate(a.fp16[j]) + ELSE + dst.qword[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 16-bit integers, and store the results in "dst". - - FOR j := 0 TO 31 - dst.word[j] := Convert_FP16_To_Int16(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 16-bit integers, and store the results in "dst". + +FOR j := 0 TO 31 + dst.word[j] := Convert_FP16_To_Int16(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 16-bit integers, and store the results in "dst". - [round_note] - - FOR j := 0 TO 31 - dst.word[j] := Convert_FP16_To_Int16(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 16-bit integers, and store the results in "dst". + [round_note] + +FOR j := 0 TO 31 + dst.word[j] := Convert_FP16_To_Int16(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 31 - IF k[j] - dst.word[j] := Convert_FP16_To_Int16(a.fp16[j]) - ELSE - dst.word[j] := src.word[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 31 + IF k[j] + dst.word[j] := Convert_FP16_To_Int16(a.fp16[j]) + ELSE + dst.word[j] := src.word[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 TO 31 - IF k[j] - dst.word[j] := Convert_FP16_To_Int16(a.fp16[j]) - ELSE - dst.word[j] := src.word[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 31 + IF k[j] + dst.word[j] := Convert_FP16_To_Int16(a.fp16[j]) + ELSE + dst.word[j] := src.word[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 TO 31 - IF k[j] - dst.word[j] := Convert_FP16_To_Int16(a.fp16[j]) - ELSE - dst.word[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 31 + IF k[j] + dst.word[j] := Convert_FP16_To_Int16(a.fp16[j]) + ELSE + dst.word[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - [round_note] - - FOR j := 0 TO 31 - IF k[j] - dst.word[j] := Convert_FP16_To_Int16(a.fp16[j]) - ELSE - dst.word[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR j := 0 TO 31 + IF k[j] + dst.word[j] := Convert_FP16_To_Int16(a.fp16[j]) + ELSE + dst.word[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 16-bit integers with truncation, and store the results in "dst". - - FOR j := 0 TO 31 - dst.word[j] := Convert_FP16_To_Int16_Truncate(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 16-bit integers with truncation, and store the results in "dst". + +FOR j := 0 TO 31 + dst.word[j] := Convert_FP16_To_Int16_Truncate(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 16-bit integers with truncation, and store the results in "dst". [sae_note] - - FOR j := 0 TO 31 - dst.word[j] := Convert_FP16_To_Int16_Truncate(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 16-bit integers with truncation, and store the results in "dst". [sae_note] + +FOR j := 0 TO 31 + dst.word[j] := Convert_FP16_To_Int16_Truncate(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 16-bit integers with truncation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 31 - IF k[j] - dst.word[j] := Convert_FP16_To_Int16_Truncate(a.fp16[j]) - ELSE - dst.word[j] := src.word[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 16-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 31 + IF k[j] + dst.word[j] := Convert_FP16_To_Int16_Truncate(a.fp16[j]) + ELSE + dst.word[j] := src.word[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 16-bit integers with truncation, and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). [sae_note] - - FOR j := 0 TO 31 - IF k[j] - dst.word[j] := Convert_FP16_To_Int16_Truncate(a.fp16[j]) - ELSE - dst.word[j] := src.word[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 16-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 TO 31 + IF k[j] + dst.word[j] := Convert_FP16_To_Int16_Truncate(a.fp16[j]) + ELSE + dst.word[j] := src.word[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 16-bit integers with truncation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 31 - IF k[j] - dst.word[j] := Convert_FP16_To_Int16_Truncate(a.fp16[j]) - ELSE - dst.word[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 16-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 31 + IF k[j] + dst.word[j] := Convert_FP16_To_Int16_Truncate(a.fp16[j]) + ELSE + dst.word[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - 16-bit integers with truncation, and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). [sae_note] - - FOR j := 0 TO 31 - IF k[j] - dst.word[j] := Convert_FP16_To_Int16_Truncate(a.fp16[j]) - ELSE - dst.word[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed 16-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 TO 31 + IF k[j] + dst.word[j] := Convert_FP16_To_Int16_Truncate(a.fp16[j]) + ELSE + dst.word[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 16-bit integers, and store the results in "dst". - - FOR j := 0 TO 31 - dst.word[j] := Convert_FP16_To_UInt16(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 16-bit integers, and store the results in "dst". + +FOR j := 0 TO 31 + dst.word[j] := Convert_FP16_To_UInt16(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 16-bit integers, and store the results in "dst". [sae_note] - - FOR j := 0 TO 31 - dst.word[j] := Convert_FP16_To_UInt16(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 16-bit integers, and store the results in "dst". [sae_note] + +FOR j := 0 TO 31 + dst.word[j] := Convert_FP16_To_UInt16(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 16-bit integers, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 31 - IF k[j] - dst.word[j] := Convert_FP16_To_UInt16(a.fp16[j]) - ELSE - dst.word[j] := src.word[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 31 + IF k[j] + dst.word[j] := Convert_FP16_To_UInt16(a.fp16[j]) + ELSE + dst.word[j] := src.word[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 16-bit integers, and store the results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). [sae_note] - - FOR j := 0 TO 31 - IF k[j] - dst.word[j] := Convert_FP16_To_UInt16(a.fp16[j]) - ELSE - dst.word[j] := src.word[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 TO 31 + IF k[j] + dst.word[j] := Convert_FP16_To_UInt16(a.fp16[j]) + ELSE + dst.word[j] := src.word[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 16-bit integers, and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 31 - IF k[j] - dst.word[j] := Convert_FP16_To_UInt16(a.fp16[j]) - ELSE - dst.word[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 31 + IF k[j] + dst.word[j] := Convert_FP16_To_UInt16(a.fp16[j]) + ELSE + dst.word[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 16-bit integers, and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). [sae_note] - - FOR j := 0 TO 31 - IF k[j] - dst.word[j] := Convert_FP16_To_UInt16(a.fp16[j]) - ELSE - dst.word[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 TO 31 + IF k[j] + dst.word[j] := Convert_FP16_To_UInt16(a.fp16[j]) + ELSE + dst.word[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 16-bit integers with truncation, and store the results in "dst". - - FOR j := 0 TO 31 - dst.word[j] := Convert_FP16_To_UInt16_Truncate(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 16-bit integers with truncation, and store the results in "dst". + +FOR j := 0 TO 31 + dst.word[j] := Convert_FP16_To_UInt16_Truncate(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 16-bit integers with truncation, and store the results in "dst". [sae_note] - - FOR j := 0 TO 31 - dst.word[j] := Convert_FP16_To_UInt16_Truncate(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 16-bit integers with truncation, and store the results in "dst". [sae_note] + +FOR j := 0 TO 31 + dst.word[j] := Convert_FP16_To_UInt16_Truncate(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 16-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 TO 31 - IF k[j] - dst.word[j] := Convert_FP16_To_UInt16_Truncate(a.fp16[j]) - ELSE - dst.word[j] := src.word[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 16-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 TO 31 + IF k[j] + dst.word[j] := Convert_FP16_To_UInt16_Truncate(a.fp16[j]) + ELSE + dst.word[j] := src.word[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 16-bit integers with truncation, and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). - [sae_note] - - FOR j := 0 TO 31 - IF k[j] - dst.word[j] := Convert_FP16_To_UInt16_Truncate(a.fp16[j]) - ELSE - dst.word[j] := src.word[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 16-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 TO 31 + IF k[j] + dst.word[j] := Convert_FP16_To_UInt16_Truncate(a.fp16[j]) + ELSE + dst.word[j] := src.word[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 16-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 TO 31 - IF k[j] - dst.word[j] := Convert_FP16_To_UInt16_Truncate(a.fp16[j]) - ELSE - dst.word[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 16-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 TO 31 + IF k[j] + dst.word[j] := Convert_FP16_To_UInt16_Truncate(a.fp16[j]) + ELSE + dst.word[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - unsigned 16-bit integers with truncation, and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] - - FOR j := 0 TO 31 - IF k[j] - dst.word[j] := Convert_FP16_To_UInt16_Truncate(a.fp16[j]) - ELSE - dst.word[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed unsigned 16-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 TO 31 + IF k[j] + dst.word[j] := Convert_FP16_To_UInt16_Truncate(a.fp16[j]) + ELSE + dst.word[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - double-precision (64-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 7 - dst.fp64[j] := Convert_FP16_To_FP64(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 7 + dst.fp64[j] := Convert_FP16_To_FP64(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - double-precision (64-bit) floating-point elements, and store the results in "dst". - [sae_note] - - FOR j := 0 to 7 - dst.fp64[j] := Convert_FP16_To_FP64(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". [sae_note] + +FOR j := 0 to 7 + dst.fp64[j] := Convert_FP16_To_FP64(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - double-precision (64-bit) floating-point elements, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - IF k[j] - dst.fp64[j] := Convert_FP16_To_FP64(a.fp16[j]) - ELSE - dst.fp64[j] := src.fp64[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + dst.fp64[j] := Convert_FP16_To_FP64(a.fp16[j]) + ELSE + dst.fp64[j] := src.fp64[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - double-precision (64-bit) floating-point elements, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). [sae_note] - - FOR j := 0 to 7 - IF k[j] - dst.fp64[j] := Convert_FP16_To_FP64(a.fp16[j]) - ELSE - dst.fp64[j] := src.fp64[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 to 7 + IF k[j] + dst.fp64[j] := Convert_FP16_To_FP64(a.fp16[j]) + ELSE + dst.fp64[j] := src.fp64[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - double-precision (64-bit) floating-point elements, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - IF k[j] - dst.fp64[j] := Convert_FP16_To_FP64(a.fp16[j]) - ELSE - dst.fp64[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + dst.fp64[j] := Convert_FP16_To_FP64(a.fp16[j]) + ELSE + dst.fp64[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - double-precision (64-bit) floating-point elements, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [sae_note] - - FOR j := 0 to 7 - IF k[j] - dst.fp64[j] := Convert_FP16_To_FP64(a.fp16[j]) - ELSE - dst.fp64[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 to 7 + IF k[j] + dst.fp64[j] := Convert_FP16_To_FP64(a.fp16[j]) + ELSE + dst.fp64[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 15 - dst.fp32[j] := Convert_FP16_To_FP32(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 15 + dst.fp32[j] := Convert_FP16_To_FP32(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst". - [sae_note] - - FOR j := 0 to 15 - dst.fp32[j] := Convert_FP16_To_FP32(a.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". [sae_note] + +FOR j := 0 to 15 + dst.fp32[j] := Convert_FP16_To_FP32(a.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - IF k[j] - dst.fp32[j] := Convert_FP16_To_FP32(a.fp16[j]) - ELSE - dst.fp32[j] := src.fp32[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + dst.fp32[j] := Convert_FP16_To_FP32(a.fp16[j]) + ELSE + dst.fp32[j] := src.fp32[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). [sae_note] - - FOR j := 0 to 15 - IF k[j] - dst.fp32[j] := Convert_FP16_To_FP32(a.fp16[j]) - ELSE - dst.fp32[j] := src.fp32[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 to 15 + IF k[j] + dst.fp32[j] := Convert_FP16_To_FP32(a.fp16[j]) + ELSE + dst.fp32[j] := src.fp32[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - IF k[j] - dst.fp32[j] := Convert_FP16_To_FP32(a.fp16[j]) - ELSE - dst.fp32[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + dst.fp32[j] := Convert_FP16_To_FP32(a.fp16[j]) + ELSE + dst.fp32[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [sae_note] - - FOR j := 0 to 15 - IF k[j] - dst.fp32[j] := Convert_FP16_To_FP32(a.fp16[j]) - ELSE - dst.fp32[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note] + +FOR j := 0 to 15 + IF k[j] + dst.fp32[j] := Convert_FP16_To_FP32(a.fp16[j]) + ELSE + dst.fp32[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert the lower half-precision (16-bit) floating-point element in "b" to a - double-precision (64-bit) floating-point element, store the result in the lower element - of "dst", and copy the upper element from "a" to the upper element of "dst". - - dst.fp64[0] := Convert_FP16_To_FP64(b.fp16[0]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert the lower half-precision (16-bit) floating-point element in "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst.fp64[0] := Convert_FP16_To_FP64(b.fp16[0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert the lower half-precision (16-bit) floating-point element in "b" to a - double-precision (64-bit) floating-point element, store the result in the lower element - of "dst", and copy the upper element from "a" to the upper element of "dst". [sae_note] - - dst.fp64[0] := Convert_FP16_To_FP64(b.fp16[0]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert the lower half-precision (16-bit) floating-point element in "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". [sae_note] + +dst.fp64[0] := Convert_FP16_To_FP64(b.fp16[0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert the lower half-precision (16-bit) floating-point element in "b" to a - double-precision (64-bit) floating-point element, store the result in the lower element - of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not - set), and copy the upper element from "a" to the upper element of "dst". - - IF k[0] - dst.fp64[0] := Convert_FP16_To_FP64(b.fp16[0]) - ELSE - dst.fp64[0] := src.fp64[0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert the lower half-precision (16-bit) floating-point element in "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst.fp64[0] := Convert_FP16_To_FP64(b.fp16[0]) +ELSE + dst.fp64[0] := src.fp64[0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - - Convert the lower half-precision (16-bit) floating-point element in "b" to a - double-precision (64-bit) floating-point element, store the result in the lower element - of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not - set), and copy the upper element from "a" to the upper element of "dst". [sae_note] - - IF k[0] - dst.fp64[0] := Convert_FP16_To_FP64(b.fp16[0]) - ELSE - dst.fp64[0] := src.fp64[0] - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + + Convert the lower half-precision (16-bit) floating-point element in "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". [sae_note] + +IF k[0] + dst.fp64[0] := Convert_FP16_To_FP64(b.fp16[0]) +ELSE + dst.fp64[0] := src.fp64[0] +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert the lower half-precision (16-bit) floating-point element in "b" to a - double-precision (64-bit) floating-point element, store the result in the lower element - of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and - copy the upper element from "a" to the upper element of "dst". - - IF k[0] - dst.fp64[0] := Convert_FP16_To_FP64(b.fp16[0]) - ELSE - dst.fp64[0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert the lower half-precision (16-bit) floating-point element in "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". + +IF k[0] + dst.fp64[0] := Convert_FP16_To_FP64(b.fp16[0]) +ELSE + dst.fp64[0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert the lower half-precision (16-bit) floating-point element in "b" to a - double-precision (64-bit) floating-point element, store the result in the lower element - of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and - copy the upper element from "a" to the upper element of "dst". [sae_note] - - IF k[0] - dst.fp64[0] := Convert_FP16_To_FP64(b.fp16[0]) - ELSE - dst.fp64[0] := 0 - FI - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert the lower half-precision (16-bit) floating-point element in "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". [sae_note] + +IF k[0] + dst.fp64[0] := Convert_FP16_To_FP64(b.fp16[0]) +ELSE + dst.fp64[0] := 0 +FI +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert the lower half-precision (16-bit) floating-point element in "b" to a - single-precision (32-bit) floating-point element, store the result in the lower element - of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". - - dst.fp32[0] := Convert_FP16_To_FP32(b.fp16[0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert the lower half-precision (16-bit) floating-point element in "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst.fp32[0] := Convert_FP16_To_FP32(b.fp16[0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert the lower half-precision (16-bit) floating-point element in "b" to a - single-precision (32-bit) floating-point element, store the result in the lower element - of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". - [sae_note] - - dst.fp32[0] := Convert_FP16_To_FP32(b.fp16[0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert the lower half-precision (16-bit) floating-point element in "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". [sae_note] + +dst.fp32[0] := Convert_FP16_To_FP32(b.fp16[0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert the lower half-precision (16-bit) floating-point element in "b" to a - single-precision (32-bit) floating-point element, store the result in the lower element - of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not - set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". - - IF k[0] - dst.fp32[0] := Convert_FP16_To_FP32(b.fp16[0]) - ELSE - dst.fp32[0] := src.fp32[0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert the lower half-precision (16-bit) floating-point element in "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp32[0] := Convert_FP16_To_FP32(b.fp16[0]) +ELSE + dst.fp32[0] := src.fp32[0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - - Convert the lower half-precision (16-bit) floating-point element in "b" to a - single-precision (32-bit) floating-point element, store the result in the lower element - of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not - set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". - [sae_note] - - IF k[0] - dst.fp32[0] := Convert_FP16_To_FP32(b.fp16[0]) - ELSE - dst.fp32[0] := src.fp32[0] - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + + Convert the lower half-precision (16-bit) floating-point element in "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". [sae_note] + +IF k[0] + dst.fp32[0] := Convert_FP16_To_FP32(b.fp16[0]) +ELSE + dst.fp32[0] := src.fp32[0] +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert the lower half-precision (16-bit) floating-point element in "b" to a - single-precision (32-bit) floating-point element, store the result in the lower element - of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and - copy the upper 3 packed elements from "a" to the upper elements of "dst". - - IF k[0] - dst.fp32[0] := Convert_FP16_To_FP32(b.fp16[0]) - ELSE - dst.fp32[0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert the lower half-precision (16-bit) floating-point element in "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp32[0] := Convert_FP16_To_FP32(b.fp16[0]) +ELSE + dst.fp32[0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - - Convert the lower half-precision (16-bit) floating-point element in "b" to a - single-precision (32-bit) floating-point element, store the result in the lower element - of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and - copy the upper 3 packed elements from "a" to the upper elements of "dst". [sae_note] - - IF k[0] - dst.fp32[0] := Convert_FP16_To_FP32(b.fp16[0]) - ELSE - dst.fp32[0] := 0 - FI - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + + Convert the lower half-precision (16-bit) floating-point element in "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". [sae_note] + +IF k[0] + dst.fp32[0] := Convert_FP16_To_FP32(b.fp16[0]) +ELSE + dst.fp32[0] := 0 +FI +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert the lower half-precision (16-bit) floating-point element in "a" to a - 32-bit integer, and store the result in "dst". - - dst.dword := Convert_FP16_To_Int32(a.fp16[0]) - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert the lower half-precision (16-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst". + +dst.dword := Convert_FP16_To_Int32(a.fp16[0]) + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert the lower half-precision (16-bit) floating-point element in "a" to a - 32-bit integer, and store the result in "dst". - [round_note] - - dst.dword := Convert_FP16_To_Int32(a.fp16[0]) - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert the lower half-precision (16-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst". + [round_note] + +dst.dword := Convert_FP16_To_Int32(a.fp16[0]) + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert the lower half-precision (16-bit) floating-point element in "a" to a - 64-bit integer, and store the result in "dst". - - dst.qword := Convert_FP16_To_Int64(a.fp16[0]) - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert the lower half-precision (16-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst". + +dst.qword := Convert_FP16_To_Int64(a.fp16[0]) + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert the lower half-precision (16-bit) floating-point element in "a" to a - 64-bit integer, and store the result in "dst". - [round_note] - - dst.qword := Convert_FP16_To_Int64(a.fp16[0]) - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert the lower half-precision (16-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst". + [round_note] + +dst.qword := Convert_FP16_To_Int64(a.fp16[0]) + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert the lower half-precision (16-bit) floating-point element in "a" to a - 32-bit integer with truncation, and store the result in "dst". - - dst.dword := Convert_FP16_To_Int32_Truncate(a.fp16[0]) - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert the lower half-precision (16-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst". + +dst.dword := Convert_FP16_To_Int32_Truncate(a.fp16[0]) + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert the lower half-precision (16-bit) floating-point element in "a" to a - 32-bit integer with truncation, and store the result in "dst". [sae_note] - - dst.dword := Convert_FP16_To_Int32_Truncate(a.fp16[0]) - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert the lower half-precision (16-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst". [sae_note] + +dst.dword := Convert_FP16_To_Int32_Truncate(a.fp16[0]) + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert the lower half-precision (16-bit) floating-point element in "a" to a - 64-bit integer with truncation, and store the result in "dst". - - dst.qword := Convert_FP16_To_Int64_Truncate(a.fp16[0]) - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert the lower half-precision (16-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst". + +dst.qword := Convert_FP16_To_Int64_Truncate(a.fp16[0]) + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert the lower half-precision (16-bit) floating-point element in "a" to a - 64-bit integer with truncation, and store the result in "dst". [sae_note] - - dst.qword := Convert_FP16_To_Int64_Truncate(a.fp16[0]) - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert the lower half-precision (16-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst". [sae_note] + +dst.qword := Convert_FP16_To_Int64_Truncate(a.fp16[0]) + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert the lower half-precision (16-bit) floating-point element in "a" to an - unsigned 32-bit integer, and store the result in "dst". - - dst.dword := Convert_FP16_To_UInt32(a.fp16[0]) - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert the lower half-precision (16-bit) floating-point element in "a" to an unsigned 32-bit integer, and store the result in "dst". + +dst.dword := Convert_FP16_To_UInt32(a.fp16[0]) + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert the lower half-precision (16-bit) floating-point element in "a" to an - unsigned 32-bit integer, and store the result in "dst". [sae_note] - - dst.dword := Convert_FP16_To_UInt32(a.fp16[0]) - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert the lower half-precision (16-bit) floating-point element in "a" to an unsigned 32-bit integer, and store the result in "dst". [sae_note] + +dst.dword := Convert_FP16_To_UInt32(a.fp16[0]) + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert the lower half-precision (16-bit) floating-point element in "a" to an - unsigned 64-bit integer, and store the result in "dst". - - dst.qword := Convert_FP16_To_UInt64(a.fp16[0]) - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert the lower half-precision (16-bit) floating-point element in "a" to an unsigned 64-bit integer, and store the result in "dst". + +dst.qword := Convert_FP16_To_UInt64(a.fp16[0]) + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert the lower half-precision (16-bit) floating-point element in "a" to an - unsigned 64-bit integer, and store the result in "dst". [round_note] - - dst.qword := Convert_FP16_To_UInt64(a.fp16[0]) - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert the lower half-precision (16-bit) floating-point element in "a" to an unsigned 64-bit integer, and store the result in "dst". [round_note] + +dst.qword := Convert_FP16_To_UInt64(a.fp16[0]) + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert the lower half-precision (16-bit) floating-point element in "a" to an - unsigned 32-bit integer with truncation, and store the result in "dst". - - dst.dword := Convert_FP16_To_UInt32_Truncate(a.fp16[0]) - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert the lower half-precision (16-bit) floating-point element in "a" to an unsigned 32-bit integer with truncation, and store the result in "dst". + +dst.dword := Convert_FP16_To_UInt32_Truncate(a.fp16[0]) + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert the lower half-precision (16-bit) floating-point element in "a" to an - unsigned 32-bit integer with truncation, and store the result in "dst". [sae_note] - - dst.dword := Convert_FP16_To_UInt32_Truncate(a.fp16[0]) - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert the lower half-precision (16-bit) floating-point element in "a" to an unsigned 32-bit integer with truncation, and store the result in "dst". [sae_note] + +dst.dword := Convert_FP16_To_UInt32_Truncate(a.fp16[0]) + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Convert the lower half-precision (16-bit) floating-point element in "a" to an - unsigned 64-bit integer with truncation, and store the result in "dst". - - dst.qword := Convert_FP16_To_UInt64_Truncate(a.fp16[0]) - - - AVX512_FP16 -
immintrin.h
- Convert + + + Convert the lower half-precision (16-bit) floating-point element in "a" to an unsigned 64-bit integer with truncation, and store the result in "dst". + +dst.qword := Convert_FP16_To_UInt64_Truncate(a.fp16[0]) + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert the lower half-precision (16-bit) floating-point element in "a" to an - unsigned 64-bit integer with truncation, and store the result in "dst". [sae_note] - - dst.qword := Convert_FP16_To_UInt64_Truncate(a.fp16[0]) - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert the lower half-precision (16-bit) floating-point element in "a" to an unsigned 64-bit integer with truncation, and store the result in "dst". [sae_note] + +dst.qword := Convert_FP16_To_UInt64_Truncate(a.fp16[0]) + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert the signed 32-bit integer "b" to a half-precision (16-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper 7 packed elements from "a" to the upper elements of "dst". - - dst.fp16[0] := Convert_Int32_To_FP16(b.fp32[0]) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert the signed 32-bit integer "b" to a half-precision (16-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +dst.fp16[0] := Convert_Int32_To_FP16(b.fp32[0]) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert the signed 32-bit integer "b" to a half-precision (16-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper 7 packed elements from "a" to the upper elements of "dst". - [round_note] - - dst.fp16[0] := Convert_Int32_To_FP16(b.fp32[0]) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert the signed 32-bit integer "b" to a half-precision (16-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst.fp16[0] := Convert_Int32_To_FP16(b.fp32[0]) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert the unsigned 32-bit integer "b" to a half-precision (16-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper 7 packed elements from "a" to the upper elements of "dst". - - dst.fp16[0] := Convert_Int32_To_FP16(b.fp32[0]) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert the unsigned 32-bit integer "b" to a half-precision (16-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +dst.fp16[0] := Convert_Int32_To_FP16(b.fp32[0]) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert the unsigned 32-bit integer "b" to a half-precision (16-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper 7 packed elements from "a" to the upper elements of "dst". - [round_note] - - dst.fp16[0] := Convert_Int32_To_FP16(b.fp32[0]) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert the unsigned 32-bit integer "b" to a half-precision (16-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst.fp16[0] := Convert_Int32_To_FP16(b.fp32[0]) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert the signed 64-bit integer "b" to a half-precision (16-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper 7 packed elements from "a" to the upper elements of "dst". - - dst.fp16[0] := Convert_Int64_To_FP16(b.fp64[0]) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert the signed 64-bit integer "b" to a half-precision (16-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +dst.fp16[0] := Convert_Int64_To_FP16(b.fp64[0]) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert the signed 64-bit integer "b" to a half-precision (16-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper 7 packed elements from "a" to the upper elements of "dst". - [round_note] - - dst.fp16[0] := Convert_Int64_To_FP16(b.fp64[0]) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert the signed 64-bit integer "b" to a half-precision (16-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst.fp16[0] := Convert_Int64_To_FP16(b.fp64[0]) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Convert the unsigned 64-bit integer "b" to a half-precision (16-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper 7 packed elements from "a" to the upper elements of "dst". - - dst.fp16[0] := Convert_Int64_To_FP16(b.fp64[0]) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + Convert the unsigned 64-bit integer "b" to a half-precision (16-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +dst.fp16[0] := Convert_Int64_To_FP16(b.fp64[0]) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - - Convert the unsigned 64-bit integer "b" to a half-precision (16-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper 7 packed elements from "a" to the upper elements of "dst". - [round_note] - - dst.fp16[0] := Convert_Int64_To_FP16(b.fp64[0]) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + + + Convert the unsigned 64-bit integer "b" to a half-precision (16-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst.fp16[0] := Convert_Int64_To_FP16(b.fp64[0]) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Copy 16-bit integer "a" to the lower elements of "dst", and zero the upper - elements of "dst". - - dst.fp16[0] := a.fp16[0] - dst[MAX:16] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + Copy 16-bit integer "a" to the lower elements of "dst", and zero the upper elements of "dst". + +dst.fp16[0] := a.fp16[0] +dst[MAX:16] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Copy the lower 16-bit integer in "a" to "dst". - - dst.fp16[0] := a.fp16[0] - dst[MAX:16] := 0 - - - AVX512_FP16 -
immintrin.h
- Convert + + + Copy the lower 16-bit integer in "a" to "dst". + +dst.fp16[0] := a.fp16[0] +dst[MAX:16] := 0 + + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Copy the lower half-precision (16-bit) floating-point element of "a" to "dst". - - dst[15:0] := a.fp16[0] - - AVX512_FP16 -
immintrin.h
- Convert + + + Copy the lower half-precision (16-bit) floating-point element of "a" to "dst". + +dst[15:0] := a.fp16[0] + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Copy the lower half-precision (16-bit) floating-point element of "a" to "dst". - - dst[15:0] := a.fp16[0] - - AVX512_FP16 -
immintrin.h
- Convert + + + Copy the lower half-precision (16-bit) floating-point element of "a" to "dst". + +dst[15:0] := a.fp16[0] + + AVX512_FP16 +
immintrin.h
+ Convert
- - - Copy the lower half-precision (16-bit) floating-point element of "a" to "dst". - - dst[15:0] := a.fp16[0] - - AVX512_FP16 -
immintrin.h
- Convert + + + Copy the lower half-precision (16-bit) floating-point element of "a" to "dst". + +dst[15:0] := a.fp16[0] + + AVX512_FP16 +
immintrin.h
+ Convert
- - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b", - and store packed maximum values in "dst". [max_float_note] - - FOR j := 0 to 31 - dst.fp16[j] := (a.fp16[j] > b.fp16[j] ? a.fp16[j] : b.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Special Math Functions + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst". [max_float_note] + +FOR j := 0 to 31 + dst.fp16[j] := (a.fp16[j] > b.fp16[j] ? a.fp16[j] : b.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Special Math Functions
- - - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b", - and store packed maximum values in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). [max_float_note] - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := (a.fp16[j] > b.fp16[j] ? a.fp16[j] : b.fp16[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Special Math Functions + + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [max_float_note] + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := (a.fp16[j] > b.fp16[j] ? a.fp16[j] : b.fp16[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Special Math Functions
- - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b", - and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). [max_float_note] - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := (a.fp16[j] > b.fp16[j] ? a.fp16[j] : b.fp16[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Special Math Functions + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [max_float_note] + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := (a.fp16[j] > b.fp16[j] ? a.fp16[j] : b.fp16[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Special Math Functions
- - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b", - and store packed maximum values in "dst". [sae_note][max_float_note] - - FOR j := 0 to 31 - dst.fp16[j] := (a.fp16[j] > b.fp16[j] ? a.fp16[j] : b.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Special Math Functions + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst". [sae_note][max_float_note] + +FOR j := 0 to 31 + dst.fp16[j] := (a.fp16[j] > b.fp16[j] ? a.fp16[j] : b.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Special Math Functions
- - - - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b", - and store packed maximum values in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). [sae_note][max_float_note] - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := (a.fp16[j] > b.fp16[j] ? a.fp16[j] : b.fp16[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Special Math Functions + + + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note][max_float_note] + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := (a.fp16[j] > b.fp16[j] ? a.fp16[j] : b.fp16[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Special Math Functions
- - - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b", - and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). [sae_note][max_float_note] - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := (a.fp16[j] > b.fp16[j] ? a.fp16[j] : b.fp16[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Special Math Functions + + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note][max_float_note] + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := (a.fp16[j] > b.fp16[j] ? a.fp16[j] : b.fp16[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Special Math Functions
- - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b", - and store packed minimum values in "dst". [min_float_note] - - FOR j := 0 to 31 - dst.fp16[j] := (a.fp16[j] < b.fp16[j] ? a.fp16[j] : b.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Special Math Functions -
- - - - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b", - and store packed minimum values in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). [min_float_note] - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := (a.fp16[j] < b.fp16[j] ? a.fp16[j] : b.fp16[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Special Math Functions + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst". [min_float_note] + +FOR j := 0 to 31 + dst.fp16[j] := (a.fp16[j] < b.fp16[j] ? a.fp16[j] : b.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Special Math Functions
- - - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b", - and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). [min_float_note] - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := (a.fp16[j] < b.fp16[j] ? a.fp16[j] : b.fp16[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Special Math Functions + + + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [min_float_note] + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := (a.fp16[j] < b.fp16[j] ? a.fp16[j] : b.fp16[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Special Math Functions +
+ + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [min_float_note] + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := (a.fp16[j] < b.fp16[j] ? a.fp16[j] : b.fp16[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Special Math Functions
- - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b", - and store packed minimum values in "dst". [sae_note] [min_float_note] - - FOR j := 0 to 31 - dst.fp16[j] := (a.fp16[j] < b.fp16[j] ? a.fp16[j] : b.fp16[j]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Special Math Functions + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst". [sae_note] [min_float_note] + +FOR j := 0 to 31 + dst.fp16[j] := (a.fp16[j] < b.fp16[j] ? a.fp16[j] : b.fp16[j]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Special Math Functions
- - - - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b", - and store packed minimum values in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). [sae_note][min_float_note] - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := (a.fp16[j] < b.fp16[j] ? a.fp16[j] : b.fp16[j]) - ELSE - dst.fp16[j] := src.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Special Math Functions + + + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note][min_float_note] + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := (a.fp16[j] < b.fp16[j] ? a.fp16[j] : b.fp16[j]) + ELSE + dst.fp16[j] := src.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Special Math Functions
- - - - - - Compare packed half-precision (16-bit) floating-point elements in "a" and "b", - and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out - when the corresponding mask bit is not set). [sae_note][min_float_note] - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := (a.fp16[j] < b.fp16[j] ? a.fp16[j] : b.fp16[j]) - ELSE - dst.fp16[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Special Math Functions + + + + + + Compare packed half-precision (16-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note][min_float_note] + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := (a.fp16[j] < b.fp16[j] ? a.fp16[j] : b.fp16[j]) + ELSE + dst.fp16[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Special Math Functions
- - - - - Extract the reduced argument of the lower half-precision (16-bit) - floating-point element in "b" by the number of bits specified by "imm8", store the - result in the lower element of "dst", and copy the upper 7 packed elements from "a" to - the upper elements of "dst". [round_imm_note] - - DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { - m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) - tmp[15:0] := src[15:0] - tmp[15:0] - IF IsInf(tmp[15:0]) - tmp[15:0] := FP16(0.0) - FI - RETURN tmp[15:0] - } - dst.fp16[0] := ReduceArgumentFP16(b.fp16[0], imm8) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Special Math Functions + + + + + Extract the reduced argument of the lower half-precision (16-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". [round_imm_note] + +DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { + m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) + tmp[15:0] := src[15:0] - tmp[15:0] + IF IsInf(tmp[15:0]) + tmp[15:0] := FP16(0.0) + FI + RETURN tmp[15:0] +} +dst.fp16[0] := ReduceArgumentFP16(b.fp16[0], imm8) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Special Math Functions
- - - - - - Extract the reduced argument of the lower half-precision (16-bit) - floating-point element in "b" by the number of bits specified by "imm8", store the - result in the lower element of "dst", and copy the upper 7 packed elements from "a" to - the upper elements of "dst". [round_imm_note][sae_note] - - DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { - m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) - tmp[15:0] := src[15:0] - tmp[15:0] - IF IsInf(tmp[15:0]) - tmp[15:0] := FP16(0.0) - FI - RETURN tmp[15:0] - } - dst.fp16[0] := ReduceArgumentFP16(b.fp16[0], imm8) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Special Math Functions + + + + + + Extract the reduced argument of the lower half-precision (16-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". [round_imm_note][sae_note] + +DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { + m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) + tmp[15:0] := src[15:0] - tmp[15:0] + IF IsInf(tmp[15:0]) + tmp[15:0] := FP16(0.0) + FI + RETURN tmp[15:0] +} +dst.fp16[0] := ReduceArgumentFP16(b.fp16[0], imm8) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Special Math Functions
- - - - - - - Extract the reduced argument of the lower half-precision (16-bit) - floating-point element in "b" by the number of bits specified by "imm8", store the - result in the lower element of "dst" using writemask "k" (the element is copied from - "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the - upper elements of "dst". [round_imm_note] - - DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { - m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) - tmp[15:0] := src[15:0] - tmp[15:0] - IF IsInf(tmp[15:0]) - tmp[15:0] := FP16(0.0) - FI - RETURN tmp[15:0] - } - IF k[0] - dst.fp16[0] := ReduceArgumentFP16(b.fp16[0], imm8) - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Special Math Functions + + + + + + + Extract the reduced argument of the lower half-precision (16-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". [round_imm_note] + +DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { + m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) + tmp[15:0] := src[15:0] - tmp[15:0] + IF IsInf(tmp[15:0]) + tmp[15:0] := FP16(0.0) + FI + RETURN tmp[15:0] +} +IF k[0] + dst.fp16[0] := ReduceArgumentFP16(b.fp16[0], imm8) +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Special Math Functions
- - - - - - - - Extract the reduced argument of the lower half-precision (16-bit) - floating-point element in "b" by the number of bits specified by "imm8", store the - result in the lower element of "dst" using writemask "k" (the element is copied from - "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the - upper elements of "dst". [round_imm_note][sae_note] - - DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { - m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) - tmp[15:0] := src[15:0] - tmp[15:0] - IF IsInf(tmp[15:0]) - tmp[15:0] := FP16(0.0) - FI - RETURN tmp[15:0] - } - IF k[0] - dst.fp16[0] := ReduceArgumentFP16(b.fp16[0], imm8) - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Special Math Functions + + + + + + + + Extract the reduced argument of the lower half-precision (16-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". [round_imm_note][sae_note] + +DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { + m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) + tmp[15:0] := src[15:0] - tmp[15:0] + IF IsInf(tmp[15:0]) + tmp[15:0] := FP16(0.0) + FI + RETURN tmp[15:0] +} +IF k[0] + dst.fp16[0] := ReduceArgumentFP16(b.fp16[0], imm8) +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Special Math Functions
- - - - - - Extract the reduced argument of the lower half-precision (16-bit) - floating-point element in "b" by the number of bits specified by "imm8", store the - result in the lower element of "dst" using zeromask "k" (the element is zeroed out when - mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper - elements of "dst". [round_imm_note] - - DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { - m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) - tmp[15:0] := src[15:0] - tmp[15:0] - IF IsInf(tmp[15:0]) - tmp[15:0] := FP16(0.0) - FI - RETURN tmp[15:0] - } - IF k[0] - dst.fp16[0] := ReduceArgumentFP16(b.fp16[0], imm8) - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Special Math Functions + + + + + + Extract the reduced argument of the lower half-precision (16-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". [round_imm_note] + +DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { + m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) + tmp[15:0] := src[15:0] - tmp[15:0] + IF IsInf(tmp[15:0]) + tmp[15:0] := FP16(0.0) + FI + RETURN tmp[15:0] +} +IF k[0] + dst.fp16[0] := ReduceArgumentFP16(b.fp16[0], imm8) +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Special Math Functions
- - - - - - - Extract the reduced argument of the lower half-precision (16-bit) - floating-point element in "b" by the number of bits specified by "imm8", store the - result in the lower element of "dst" using zeromask "k" (the element is zeroed out when - mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper - elements of "dst". [round_imm_note][sae_note] - - DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { - m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) - tmp[15:0] := src[15:0] - tmp[15:0] - IF IsInf(tmp[15:0]) - tmp[15:0] := FP16(0.0) - FI - RETURN tmp[15:0] - } - IF k[0] - dst.fp16[0] := ReduceArgumentFP16(b.fp16[0], imm8) - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Special Math Functions + + + + + + + Extract the reduced argument of the lower half-precision (16-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". [round_imm_note][sae_note] + +DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { + m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) + tmp[15:0] := src[15:0] - tmp[15:0] + IF IsInf(tmp[15:0]) + tmp[15:0] := FP16(0.0) + FI + RETURN tmp[15:0] +} +IF k[0] + dst.fp16[0] := ReduceArgumentFP16(b.fp16[0], imm8) +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Special Math Functions
- - - Load a half-precision (16-bit) floating-point element from memory into the - lower element of "dst", and zero the upper elements. - - dst.fp16[0] := MEM[mem_addr].fp16[0] - dst[MAX:16] := 0 - - - AVX512_FP16 -
immintrin.h
- Load + + + Load a half-precision (16-bit) floating-point element from memory into the lower element of "dst", and zero the upper elements. + +dst.fp16[0] := MEM[mem_addr].fp16[0] +dst[MAX:16] := 0 + + + AVX512_FP16 +
immintrin.h
+ Load
- - - - - Load a half-precision (16-bit) floating-point element from memory into the - lower element of "dst" using writemask "k" (the element is copied from "src" when mask - bit 0 is not set), and set the upper elements of "dst" to zero. - - IF k[0] - dst.fp16[0] := MEM[mem_addr].fp16[0] - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[MAX:16] := 0 - - - AVX512_FP16 -
immintrin.h
- Load + + + + + Load a half-precision (16-bit) floating-point element from memory into the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and set the upper elements of "dst" to zero. + +IF k[0] + dst.fp16[0] := MEM[mem_addr].fp16[0] +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[MAX:16] := 0 + + + AVX512_FP16 +
immintrin.h
+ Load
- - - - Load a half-precision (16-bit) floating-point element from memory into the - lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is - not set), and set the upper elements of "dst" to zero. - - IF k[0] - dst.fp16[0] := MEM[mem_addr].fp16[0] - ELSE - dst.fp16[0] := 0 - FI - dst[MAX:16] := 0 - - - AVX512_FP16 -
immintrin.h
- Load + + + + Load a half-precision (16-bit) floating-point element from memory into the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and set the upper elements of "dst" to zero. + +IF k[0] + dst.fp16[0] := MEM[mem_addr].fp16[0] +ELSE + dst.fp16[0] := 0 +FI +dst[MAX:16] := 0 + + + AVX512_FP16 +
immintrin.h
+ Load
- - - Load 512-bits (composed of 32 packed half-precision (16-bit) floating-point - elements) from memory into "dst". - "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may - be generated. - - dst[511:0] := MEM[mem_addr+511:mem_addr] - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Load + + + Load 512-bits (composed of 32 packed half-precision (16-bit) floating-point elements) from memory into "dst". + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +dst[511:0] := MEM[mem_addr+511:mem_addr] +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Load
- - - Load 512-bits (composed of 32 packed half-precision (16-bit) floating-point - elements) from memory into "dst". - "mem_addr" does not need to be aligned on any particular boundary. - - dst[511:0] := MEM[mem_addr+511:mem_addr] - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Load + + + Load 512-bits (composed of 32 packed half-precision (16-bit) floating-point elements) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[511:0] := MEM[mem_addr+511:mem_addr] +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Load
- - - - Store the lower half-precision (16-bit) floating-point element from "a" into - memory. - - MEM[mem_addr].fp16[0] := a.fp16[0] - - - AVX512_FP16 -
immintrin.h
- Store + + + + Store the lower half-precision (16-bit) floating-point element from "a" into memory. + +MEM[mem_addr].fp16[0] := a.fp16[0] + + + AVX512_FP16 +
immintrin.h
+ Store
- - - - - Store the lower half-precision (16-bit) floating-point element from "a" into - memory using writemask "k". - - IF k[0] - MEM[mem_addr].fp16[0] := a.fp16[0] - FI - - - AVX512_FP16 -
immintrin.h
- Store + + + + + Store the lower half-precision (16-bit) floating-point element from "a" into memory using writemask "k". + +IF k[0] + MEM[mem_addr].fp16[0] := a.fp16[0] +FI + + + AVX512_FP16 +
immintrin.h
+ Store
- - - - Store 512-bits (composed of 32 packed half-precision (16-bit) floating-point - elements) from "a" into memory. - "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+511:mem_addr] := a[511:0] - - - AVX512_FP16 -
immintrin.h
- Store + + + + Store 512-bits (composed of 32 packed half-precision (16-bit) floating-point elements) from "a" into memory. + "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + + AVX512_FP16 +
immintrin.h
+ Store
- - - - Store 512-bits (composed of 32 packed half-precision (16-bit) floating-point - elements) from "a" into memory. - "mem_addr" does not need to be aligned on any particular boundary. - - MEM[mem_addr+511:mem_addr] := a[511:0] - - - AVX512_FP16 -
immintrin.h
- Store + + + + Store 512-bits (composed of 32 packed half-precision (16-bit) floating-point elements) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+511:mem_addr] := a[511:0] + + + AVX512_FP16 +
immintrin.h
+ Store
- - - - Move the lower half-precision (16-bit) floating-point element from "b" to the - lower element of "dst", and copy the upper 7 packed elements from "a" to the upper - elements of "dst". - - dst.fp16[0] := b.fp16[0] - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Move + + + + Move the lower half-precision (16-bit) floating-point element from "b" to the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +dst.fp16[0] := b.fp16[0] +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Move
- - - - - - Move the lower half-precision (16-bit) floating-point element from "b" to the - lower element of "dst" using writemask "k" (the element is copied from "src" when mask - bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements - of "dst". - - IF k[0] - dst.fp16[0] := b.fp16[0] - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Move + + + + + + Move the lower half-precision (16-bit) floating-point element from "b" to the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := b.fp16[0] +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Move
- - - - - Move the lower half-precision (16-bit) floating-point element from "b" to the - lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is - not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". - - IF k[0] - dst.fp16[0] := b.fp16[0] - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Move + + + + + Move the lower half-precision (16-bit) floating-point element from "b" to the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := b.fp16[0] +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Move
- - - - Round packed half-precision (16-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst". - [round_imm_note] - - DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { - m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) - RETURN tmp.fp16 - } - FOR i := 0 to 31 - dst.fp16[i] := RoundScaleFP16(a.fp16[i], imm8) - ENDFOR - dest[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + Round packed half-precision (16-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". [round_imm_note] + +DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { + m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) + RETURN tmp.fp16 +} +FOR i := 0 to 31 + dst.fp16[i] := RoundScaleFP16(a.fp16[i], imm8) +ENDFOR +dest[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - Round packed half-precision (16-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst". - [round_imm_note][sae_note] - - DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { - m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) - RETURN tmp.fp16 - } - FOR i := 0 to 31 - dst.fp16[i] := RoundScaleFP16(a.fp16[i], imm8) - ENDFOR - dest[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + Round packed half-precision (16-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". [round_imm_note][sae_note] + +DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { + m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) + RETURN tmp.fp16 +} +FOR i := 0 to 31 + dst.fp16[i] := RoundScaleFP16(a.fp16[i], imm8) +ENDFOR +dest[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - Round packed half-precision (16-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). [round_imm_note] - - DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { - m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) - RETURN tmp.fp16 - } - FOR i := 0 to 31 - IF k[i] - dst.fp16[i] := RoundScaleFP16(a.fp16[i], imm8) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dest[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + + Round packed half-precision (16-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note] + +DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { + m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) + RETURN tmp.fp16 +} +FOR i := 0 to 31 + IF k[i] + dst.fp16[i] := RoundScaleFP16(a.fp16[i], imm8) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dest[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - - Round packed half-precision (16-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). [round_imm_note][sae_note] - - DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { - m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) - RETURN tmp.fp16 - } - FOR i := 0 to 31 - IF k[i] - dst.fp16[i] := RoundScaleFP16(a.fp16[i], imm8) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dest[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + + + Round packed half-precision (16-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note][sae_note] + +DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { + m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) + RETURN tmp.fp16 +} +FOR i := 0 to 31 + IF k[i] + dst.fp16[i] := RoundScaleFP16(a.fp16[i], imm8) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dest[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - Round packed half-precision (16-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [round_imm_note] - - DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { - m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) - RETURN tmp.fp16 - } - FOR i := 0 to 31 - IF k[i] - dst.fp16[i] := RoundScaleFP16(a.fp16[i], imm8) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dest[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + Round packed half-precision (16-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note] + +DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { + m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) + RETURN tmp.fp16 +} +FOR i := 0 to 31 + IF k[i] + dst.fp16[i] := RoundScaleFP16(a.fp16[i], imm8) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dest[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - Round packed half-precision (16-bit) floating-point elements in "a" to the - number of fraction bits specified by "imm8", and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - [round_imm_note][sae_note] - - DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { - m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) - RETURN tmp.fp16 - } - FOR i := 0 to 31 - IF k[i] - dst.fp16[i] := RoundScaleFP16(a.fp16[i], imm8) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dest[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + + Round packed half-precision (16-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note][sae_note] + +DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { + m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) + RETURN tmp.fp16 +} +FOR i := 0 to 31 + IF k[i] + dst.fp16[i] := RoundScaleFP16(a.fp16[i], imm8) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dest[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - Round the lower half-precision (16-bit) floating-point element in "b" to the - number of fraction bits specified by "imm8", store the result in the lower element of - "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". - [round_imm_note] - - DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { - m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) - RETURN tmp.fp16 - } - dst.fp16[0] := RoundScaleFP16(b.fp16[0], imm8) - dst[127:16] := a[127:16] - dest[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + Round the lower half-precision (16-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". [round_imm_note] + +DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { + m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) + RETURN tmp.fp16 +} +dst.fp16[0] := RoundScaleFP16(b.fp16[0], imm8) +dst[127:16] := a[127:16] +dest[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - Round the lower half-precision (16-bit) floating-point element in "b" to the - number of fraction bits specified by "imm8", store the result in the lower element of - "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". - [round_imm_note][sae_note] - - DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { - m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) - RETURN tmp.fp16 - } - dst.fp16[0] := RoundScaleFP16(b.fp16[0], imm8) - dst[127:16] := a[127:16] - dest[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + + Round the lower half-precision (16-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". [round_imm_note][sae_note] + +DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { + m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) + RETURN tmp.fp16 +} +dst.fp16[0] := RoundScaleFP16(b.fp16[0], imm8) +dst[127:16] := a[127:16] +dest[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - - Round the lower half-precision (16-bit) floating-point element in "b" to the - number of fraction bits specified by "imm8", store the result in the lower element of - "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), - and copy the upper 7 packed elements from "a" to the upper elements of "dst". - [round_imm_note] - - DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { - m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) - RETURN tmp.fp16 - } - IF k[0] - dst.fp16[0] := RoundScaleFP16(b.fp16[0], imm8) - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dest[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + + + Round the lower half-precision (16-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". [round_imm_note] + +DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { + m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) + RETURN tmp.fp16 +} +IF k[0] + dst.fp16[0] := RoundScaleFP16(b.fp16[0], imm8) +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dest[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - - - Round the lower half-precision (16-bit) floating-point element in "b" to the - number of fraction bits specified by "imm8", store the result in the lower element of - "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), - and copy the upper 7 packed elements from "a" to the upper elements of "dst". - [round_imm_note][sae_note] - - DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { - m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) - RETURN tmp.fp16 - } - IF k[0] - dst.fp16[0] := RoundScaleFP16(b.fp16[0], imm8) - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dest[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + + + + Round the lower half-precision (16-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". [round_imm_note][sae_note] + +DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { + m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) + RETURN tmp.fp16 +} +IF k[0] + dst.fp16[0] := RoundScaleFP16(b.fp16[0], imm8) +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dest[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - Round the lower half-precision (16-bit) floating-point element in "b" to the - number of fraction bits specified by "imm8", store the result in the lower element of - "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and - copy the upper 7 packed elements from "a" to the upper elements of "dst". - [round_imm_note] - - DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { - m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) - RETURN tmp.fp16 - } - IF k[0] - dst.fp16[0] := RoundScaleFP16(b.fp16[0], imm8) - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dest[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + + Round the lower half-precision (16-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". [round_imm_note] + +DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { + m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) + RETURN tmp.fp16 +} +IF k[0] + dst.fp16[0] := RoundScaleFP16(b.fp16[0], imm8) +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dest[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - - Round the lower half-precision (16-bit) floating-point element in "b" to the - number of fraction bits specified by "imm8", store the result in the lower element of - "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and - copy the upper 7 packed elements from "a" to the upper elements of "dst". - [round_imm_note][sae_note] - - DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { - m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) - RETURN tmp.fp16 - } - IF k[0] - dst.fp16[0] := RoundScaleFP16(b.fp16[0], imm8) - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dest[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + + + Round the lower half-precision (16-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". [round_imm_note][sae_note] + +DEFINE RoundScaleFP16(src.fp16, imm8[7:0]) { + m.fp16 := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp.fp16 := POW(FP16(2.0), -m) * ROUND(POW(FP16(2.0), m) * src.fp16, imm8[3:0]) + RETURN tmp.fp16 +} +IF k[0] + dst.fp16[0] := RoundScaleFP16(b.fp16[0], imm8) +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dest[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - Convert the exponent of each packed half-precision (16-bit) floating-point - element in "a" to a half-precision (16-bit) floating-point number representing the - integer exponent, and store the results in "dst". This intrinsic essentially calculates - "floor(log2(x))" for each element. - FOR i := 0 to 31 - dst.fp16[i] := ConvertExpFP16(a.fp16[i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + Convert the exponent of each packed half-precision (16-bit) floating-point element in "a" to a half-precision (16-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR i := 0 to 31 + dst.fp16[i] := ConvertExpFP16(a.fp16[i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - Convert the exponent of each packed half-precision (16-bit) floating-point - element in "a" to a half-precision (16-bit) floating-point number representing the - integer exponent, and store the results in "dst". This intrinsic essentially calculates - "floor(log2(x))" for each element. [sae_note] - FOR i := 0 to 31 - dst.fp16[i] := ConvertExpFP16(a.fp16[i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + Convert the exponent of each packed half-precision (16-bit) floating-point element in "a" to a half-precision (16-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. [sae_note] + FOR i := 0 to 31 + dst.fp16[i] := ConvertExpFP16(a.fp16[i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - Convert the exponent of each packed half-precision (16-bit) floating-point - element in "a" to a half-precision (16-bit) floating-point number representing the - integer exponent, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). This intrinsic - essentially calculates "floor(log2(x))" for each element. - FOR i := 0 to 31 - IF k[i] - dst.fp16[i] := ConvertExpFP16(a.fp16[i]) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + Convert the exponent of each packed half-precision (16-bit) floating-point element in "a" to a half-precision (16-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR i := 0 to 31 + IF k[i] + dst.fp16[i] := ConvertExpFP16(a.fp16[i]) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - Convert the exponent of each packed half-precision (16-bit) floating-point - element in "a" to a half-precision (16-bit) floating-point number representing the - integer exponent, and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). This intrinsic - essentially calculates "floor(log2(x))" for each element. [sae_note] - FOR i := 0 to 31 - IF k[i] - dst.fp16[i] := ConvertExpFP16(a.fp16[i]) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + + Convert the exponent of each packed half-precision (16-bit) floating-point element in "a" to a half-precision (16-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. [sae_note] + FOR i := 0 to 31 + IF k[i] + dst.fp16[i] := ConvertExpFP16(a.fp16[i]) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - Convert the exponent of each packed half-precision (16-bit) floating-point - element in "a" to a half-precision (16-bit) floating-point number representing the - integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). This intrinsic essentially calculates - "floor(log2(x))" for each element. - FOR i := 0 to 31 - IF k[i] - dst.fp16[i] := ConvertExpFP16(a.fp16[i]) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + Convert the exponent of each packed half-precision (16-bit) floating-point element in "a" to a half-precision (16-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR i := 0 to 31 + IF k[i] + dst.fp16[i] := ConvertExpFP16(a.fp16[i]) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - Convert the exponent of each packed half-precision (16-bit) floating-point - element in "a" to a half-precision (16-bit) floating-point number representing the - integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). This intrinsic essentially calculates - "floor(log2(x))" for each element. [sae_note] - FOR i := 0 to 31 - IF k[i] - dst.fp16[i] := ConvertExpFP16(a.fp16[i]) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + Convert the exponent of each packed half-precision (16-bit) floating-point element in "a" to a half-precision (16-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element. [sae_note] + FOR i := 0 to 31 + IF k[i] + dst.fp16[i] := ConvertExpFP16(a.fp16[i]) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - Convert the exponent of the lower half-precision (16-bit) floating-point - element in "b" to a half-precision (16-bit) floating-point number representing the - integer exponent, store the result in the lower element of "dst", and copy the upper 7 - packed elements from "a" to the upper elements of "dst". This intrinsic essentially - calculates "floor(log2(x))" for the lower element. - dst.fp16[0] := ConvertExpFP16(b.fp16[0]) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + Convert the exponent of the lower half-precision (16-bit) floating-point element in "b" to a half-precision (16-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + dst.fp16[0] := ConvertExpFP16(b.fp16[0]) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - Convert the exponent of the lower half-precision (16-bit) floating-point - element in "b" to a half-precision (16-bit) floating-point number representing the - integer exponent, store the result in the lower element of "dst", and copy the upper 7 - packed elements from "a" to the upper elements of "dst". This intrinsic essentially - calculates "floor(log2(x))" for the lower element. [sae_note] - dst.fp16[0] := ConvertExpFP16(b.fp16[0]) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + Convert the exponent of the lower half-precision (16-bit) floating-point element in "b" to a half-precision (16-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. [sae_note] + dst.fp16[0] := ConvertExpFP16(b.fp16[0]) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - Convert the exponent of the lower half-precision (16-bit) floating-point - element in "b" to a half-precision (16-bit) floating-point number representing the - integer exponent, store the result in the lower element of "dst" using writemask "k" - (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 - packed elements from "a" to the upper elements of "dst". This intrinsic essentially - calculates "floor(log2(x))" for the lower element. - IF k[0] - dst.fp16[0] := ConvertExpFP16(b.fp16[0]) - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + + Convert the exponent of the lower half-precision (16-bit) floating-point element in "b" to a half-precision (16-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + IF k[0] + dst.fp16[0] := ConvertExpFP16(b.fp16[0]) +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - - Convert the exponent of the lower half-precision (16-bit) floating-point - element in "b" to a half-precision (16-bit) floating-point number representing the - integer exponent, store the result in the lower element of "dst" using writemask "k" - (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 - packed elements from "a" to the upper elements of "dst". This intrinsic essentially - calculates "floor(log2(x))" for the lower element. [sae_note] - IF k[0] - dst.fp16[0] := ConvertExpFP16(b.fp16[0]) - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + + + Convert the exponent of the lower half-precision (16-bit) floating-point element in "b" to a half-precision (16-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. [sae_note] + IF k[0] + dst.fp16[0] := ConvertExpFP16(b.fp16[0]) +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - Convert the exponent of the lower half-precision (16-bit) floating-point - element in "b" to a half-precision (16-bit) floating-point number representing the - integer exponent, store the result in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements - from "a" to the upper elements of "dst". This intrinsic essentially calculates - "floor(log2(x))" for the lower element. - IF k[0] - dst.fp16[0] := ConvertExpFP16(b.fp16[0]) - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + Convert the exponent of the lower half-precision (16-bit) floating-point element in "b" to a half-precision (16-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. + IF k[0] + dst.fp16[0] := ConvertExpFP16(b.fp16[0]) +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - Convert the exponent of the lower half-precision (16-bit) floating-point - element in "b" to a half-precision (16-bit) floating-point number representing the - integer exponent, store the result in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements - from "a" to the upper elements of "dst". This intrinsic essentially calculates - "floor(log2(x))" for the lower element. [sae_note] - IF k[0] - dst.fp16[0] := ConvertExpFP16(b.fp16[0]) - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + + Convert the exponent of the lower half-precision (16-bit) floating-point element in "b" to a half-precision (16-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element. [sae_note] + IF k[0] + dst.fp16[0] := ConvertExpFP16(b.fp16[0]) +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - Normalize the mantissas of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst". This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" - and the sign depends on "sign" and the source sign. + + + + + Normalize the mantissas of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" and the sign depends on "sign" and the source sign. [getmant_note] - FOR i := 0 TO 31 - dst.fp16[i] := GetNormalizedMantissaFP16(a.fp16[i], norm, sign) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + FOR i := 0 TO 31 + dst.fp16[i] := GetNormalizedMantissaFP16(a.fp16[i], norm, sign) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - Normalize the mantissas of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst". This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" - and the sign depends on "sign" and the source sign. + + + + + + Normalize the mantissas of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" and the sign depends on "sign" and the source sign. [getmant_note][sae_note] - FOR i := 0 TO 31 - dst.fp16[i] := GetNormalizedMantissaFP16(a.fp16[i], norm, sign) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + FOR i := 0 TO 31 + dst.fp16[i] := GetNormalizedMantissaFP16(a.fp16[i], norm, sign) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - - Normalize the mantissas of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). This intrinsic essentially - calculates "±(2^k)*|x.significand|", where "k" depends on the interval range - defined by "norm" and the sign depends on "sign" and the source sign. + + + + + + + Normalize the mantissas of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" and the sign depends on "sign" and the source sign. [getmant_note] - FOR i := 0 TO 31 - IF k[i] - dst.fp16[i] := GetNormalizedMantissaFP16(a.fp16[i], norm, sign) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + FOR i := 0 TO 31 + IF k[i] + dst.fp16[i] := GetNormalizedMantissaFP16(a.fp16[i], norm, sign) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - - - Normalize the mantissas of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). This intrinsic essentially - calculates "±(2^k)*|x.significand|", where "k" depends on the interval range - defined by "norm" and the sign depends on "sign" and the source sign. + + + + + + + + Normalize the mantissas of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" and the sign depends on "sign" and the source sign. [getmant_note][sae_note] - FOR i := 0 TO 31 - IF k[i] - dst.fp16[i] := GetNormalizedMantissaFP16(a.fp16[i], norm, sign) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + FOR i := 0 TO 31 + IF k[i] + dst.fp16[i] := GetNormalizedMantissaFP16(a.fp16[i], norm, sign) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - Normalize the mantissas of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" - and the sign depends on "sign" and the source sign. + + + + + + Normalize the mantissas of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" and the sign depends on "sign" and the source sign. [getmant_note] - FOR i := 0 TO 31 - IF k[i] - dst.fp16[i] := GetNormalizedMantissaFP16(a.fp16[i], norm, sign) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + FOR i := 0 TO 31 + IF k[i] + dst.fp16[i] := GetNormalizedMantissaFP16(a.fp16[i], norm, sign) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - - Normalize the mantissas of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" - and the sign depends on "sign" and the source sign. + + + + + + + Normalize the mantissas of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" and the sign depends on "sign" and the source sign. [getmant_note][sae_note] - FOR i := 0 TO 31 - IF k[i] - dst.fp16[i] := GetNormalizedMantissaFP16(a.fp16[i], norm, sign) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + FOR i := 0 TO 31 + IF k[i] + dst.fp16[i] := GetNormalizedMantissaFP16(a.fp16[i], norm, sign) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - Normalize the mantissas of the lower half-precision (16-bit) floating-point - element in "b", store the result in the lower element of "dst", and copy the upper 7 - packed elements from "a" to the upper elements of "dst". This intrinsic essentially - calculates "±(2^k)*|x.significand|", where "k" depends on the interval range - defined by "norm" and the sign depends on "sign" and the source sign. - [getmant_note] - dst.fp16[0] := GetNormalizedMantissaFP16(b.fp16[0], norm, sign) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + + Normalize the mantissas of the lower half-precision (16-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" and the sign depends on "sign" and the source sign. + [getmant_note] + dst.fp16[0] := GetNormalizedMantissaFP16(b.fp16[0], norm, sign) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - - Normalize the mantissas of the lower half-precision (16-bit) floating-point - element in "b", store the result in the lower element of "dst", and copy the upper 7 - packed elements from "a" to the upper elements of "dst". This intrinsic essentially - calculates "±(2^k)*|x.significand|", where "k" depends on the interval range - defined by "norm" and the sign depends on "sign" and the source sign. - [getmant_note][sae_note] - dst.fp16[0] := GetNormalizedMantissaFP16(b.fp16[0], norm, sign) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + + + Normalize the mantissas of the lower half-precision (16-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" and the sign depends on "sign" and the source sign. + [getmant_note][sae_note] + dst.fp16[0] := GetNormalizedMantissaFP16(b.fp16[0], norm, sign) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - - - Normalize the mantissas of the lower half-precision (16-bit) floating-point - element in "b", store the result in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed - elements from "a" to the upper elements of "dst". This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" - and the sign depends on "sign" and the source sign. - [getmant_note] - IF k[0] - dst.fp16[0] := GetNormalizedMantissaFP16(b.fp16[0], norm, sign) - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + + + + Normalize the mantissas of the lower half-precision (16-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" and the sign depends on "sign" and the source sign. + [getmant_note] + IF k[0] + dst.fp16[0] := GetNormalizedMantissaFP16(b.fp16[0], norm, sign) +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - - - - Normalize the mantissas of the lower half-precision (16-bit) floating-point - element in "b", store the result in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed - elements from "a" to the upper elements of "dst". This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" - and the sign depends on "sign" and the source sign. - [getmant_note][sae_note] - IF k[0] - dst.fp16[0] := GetNormalizedMantissaFP16(b.fp16[0], norm, sign) - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + + + + + Normalize the mantissas of the lower half-precision (16-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" and the sign depends on "sign" and the source sign. + [getmant_note][sae_note] + IF k[0] + dst.fp16[0] := GetNormalizedMantissaFP16(b.fp16[0], norm, sign) +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - - Normalize the mantissas of the lower half-precision (16-bit) floating-point - element in "b", store the result in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements - from "a" to the upper elements of "dst". This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" - and the sign depends on "sign" and the source sign. - [getmant_note] - IF k[0] - dst.fp16[0] := GetNormalizedMantissaFP16(b.fp16[0], norm, sign) - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + + + Normalize the mantissas of the lower half-precision (16-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" and the sign depends on "sign" and the source sign. + [getmant_note] + IF k[0] + dst.fp16[0] := GetNormalizedMantissaFP16(b.fp16[0], norm, sign) +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - - - Normalize the mantissas of the lower half-precision (16-bit) floating-point - element in "b", store the result in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements - from "a" to the upper elements of "dst". This intrinsic essentially calculates - "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" - and the sign depends on "sign" and the source sign. - [getmant_note][sae_note] - IF k[0] - dst.fp16[0] := GetNormalizedMantissaFP16(b.fp16[0], norm, sign) - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + + + + Normalize the mantissas of the lower half-precision (16-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "norm" and the sign depends on "sign" and the source sign. + [getmant_note][sae_note] + IF k[0] + dst.fp16[0] := GetNormalizedMantissaFP16(b.fp16[0], norm, sign) +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - Extract the reduced argument of packed half-precision (16-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst". [round_imm_note] - - DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { - m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) - tmp[15:0] := src[15:0] - tmp[15:0] - IF IsInf(tmp[15:0]) - tmp[15:0] := FP16(0.0) - FI - RETURN tmp[15:0] - } - FOR i := 0 to 31 - dst.fp16[i] := ReduceArgumentFP16(a.fp16[i], imm8) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + Extract the reduced argument of packed half-precision (16-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". [round_imm_note] + +DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { + m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) + tmp[15:0] := src[15:0] - tmp[15:0] + IF IsInf(tmp[15:0]) + tmp[15:0] := FP16(0.0) + FI + RETURN tmp[15:0] +} +FOR i := 0 to 31 + dst.fp16[i] := ReduceArgumentFP16(a.fp16[i], imm8) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - Extract the reduced argument of packed half-precision (16-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst". [round_imm_note][sae_note] - - DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { - m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) - tmp[15:0] := src[15:0] - tmp[15:0] - IF IsInf(tmp[15:0]) - tmp[15:0] := FP16(0.0) - FI - RETURN tmp[15:0] - } - FOR i := 0 to 31 - dst.fp16[i] := ReduceArgumentFP16(a.fp16[i], imm8) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + Extract the reduced argument of packed half-precision (16-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". [round_imm_note][sae_note] + +DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { + m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) + tmp[15:0] := src[15:0] - tmp[15:0] + IF IsInf(tmp[15:0]) + tmp[15:0] := FP16(0.0) + FI + RETURN tmp[15:0] +} +FOR i := 0 to 31 + dst.fp16[i] := ReduceArgumentFP16(a.fp16[i], imm8) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - Extract the reduced argument of packed half-precision (16-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). [round_imm_note] - - DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { - m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) - tmp[15:0] := src[15:0] - tmp[15:0] - IF IsInf(tmp[15:0]) - tmp[15:0] := FP16(0.0) - FI - RETURN tmp[15:0] - } - FOR i := 0 to 31 - IF k[i] - dst.fp16[i] := ReduceArgumentFP16(a.fp16[i], imm8) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + + Extract the reduced argument of packed half-precision (16-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note] + +DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { + m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) + tmp[15:0] := src[15:0] - tmp[15:0] + IF IsInf(tmp[15:0]) + tmp[15:0] := FP16(0.0) + FI + RETURN tmp[15:0] +} +FOR i := 0 to 31 + IF k[i] + dst.fp16[i] := ReduceArgumentFP16(a.fp16[i], imm8) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - - Extract the reduced argument of packed half-precision (16-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). [round_imm_note][sae_note] - - DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { - m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) - tmp[15:0] := src[15:0] - tmp[15:0] - IF IsInf(tmp[15:0]) - tmp[15:0] := FP16(0.0) - FI - RETURN tmp[15:0] - } - FOR i := 0 to 31 - IF k[i] - dst.fp16[i] := ReduceArgumentFP16(a.fp16[i], imm8) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + + + Extract the reduced argument of packed half-precision (16-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note][sae_note] + +DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { + m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) + tmp[15:0] := src[15:0] - tmp[15:0] + IF IsInf(tmp[15:0]) + tmp[15:0] := FP16(0.0) + FI + RETURN tmp[15:0] +} +FOR i := 0 to 31 + IF k[i] + dst.fp16[i] := ReduceArgumentFP16(a.fp16[i], imm8) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - Extract the reduced argument of packed half-precision (16-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). [round_imm_note] - - DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { - m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) - tmp[15:0] := src[15:0] - tmp[15:0] - IF IsInf(tmp[15:0]) - tmp[15:0] := FP16(0.0) - FI - RETURN tmp[15:0] - } - FOR i := 0 to 31 - IF k[i] - dst.fp16[i] := ReduceArgumentFP16(a.fp16[i], imm8) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + Extract the reduced argument of packed half-precision (16-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note] + +DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { + m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) + tmp[15:0] := src[15:0] - tmp[15:0] + IF IsInf(tmp[15:0]) + tmp[15:0] := FP16(0.0) + FI + RETURN tmp[15:0] +} +FOR i := 0 to 31 + IF k[i] + dst.fp16[i] := ReduceArgumentFP16(a.fp16[i], imm8) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - Extract the reduced argument of packed half-precision (16-bit) floating-point - elements in "a" by the number of bits specified by "imm8", and store the results in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). [round_imm_note][sae_note] - - DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { - m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be - preserved - tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) - tmp[15:0] := src[15:0] - tmp[15:0] - IF IsInf(tmp[15:0]) - tmp[15:0] := FP16(0.0) - FI - RETURN tmp[15:0] - } - FOR i := 0 to 31 - IF k[i] - dst.fp16[i] := ReduceArgumentFP16(a.fp16[i], imm8) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + + Extract the reduced argument of packed half-precision (16-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note][sae_note] + +DEFINE ReduceArgumentFP16(src[15:0], imm8[7:0]) { + m[15:0] := FP16(imm8[7:4]) // number of fraction bits after the binary point to be preserved + tmp[15:0] := POW(2.0, FP16(-m)) * ROUND(POW(2.0, FP16(m)) * src[15:0], imm8[3:0]) + tmp[15:0] := src[15:0] - tmp[15:0] + IF IsInf(tmp[15:0]) + tmp[15:0] := FP16(0.0) + FI + RETURN tmp[15:0] +} +FOR i := 0 to 31 + IF k[i] + dst.fp16[i] := ReduceArgumentFP16(a.fp16[i], imm8) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - Scale the packed half-precision (16-bit) floating-point elements in "a" using - values from "b", and store the results in "dst". - DEFINE ScaleFP16(src1, src2) { - denormal1 := (a.exp == 0) and (a.fraction != 0) - denormal2 := (b.exp == 0) and (b.fraction != 0) - tmp1 := src1 - tmp2 := src2 - IF MXCSR.DAZ - IF denormal1 + + + + Scale the packed half-precision (16-bit) floating-point elements in "a" using values from "b", and store the results in "dst". + DEFINE ScaleFP16(src1, src2) { + denormal1 := (a.exp == 0) and (a.fraction != 0) + denormal2 := (b.exp == 0) and (b.fraction != 0) + tmp1 := src1 + tmp2 := src2 + IF MXCSR.DAZ + IF denormal1 tmp1 := 0 - FI - IF denormal2 + FI + IF denormal2 tmp2 := 0 - FI - FI - RETURN tmp1 * POW(2.0, FLOOR(tmp2)) - } - FOR i := 0 to 15 - dst.fp16[i] := ScaleFP16(a.fp16[i], b.fp16[i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + FI + FI + RETURN tmp1 * POW(2.0, FLOOR(tmp2)) +} +FOR i := 0 to 15 + dst.fp16[i] := ScaleFP16(a.fp16[i], b.fp16[i]) +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - Scale the packed half-precision (16-bit) floating-point elements in "a" using - values from "b", and store the results in "dst". - [round_note] - DEFINE ScaleFP16(src1, src2) { - denormal1 := (a.exp == 0) and (a.fraction != 0) - denormal2 := (b.exp == 0) and (b.fraction != 0) - tmp1 := src1 - tmp2 := src2 - IF MXCSR.DAZ - IF denormal1 + + + + + Scale the packed half-precision (16-bit) floating-point elements in "a" using values from "b", and store the results in "dst". + [round_note] + DEFINE ScaleFP16(src1, src2) { + denormal1 := (a.exp == 0) and (a.fraction != 0) + denormal2 := (b.exp == 0) and (b.fraction != 0) + tmp1 := src1 + tmp2 := src2 + IF MXCSR.DAZ + IF denormal1 tmp1 := 0 - FI - IF denormal2 + FI + IF denormal2 tmp2 := 0 - FI - FI - RETURN tmp1 * POW(2.0, FLOOR(tmp2)) - } - FOR i := 0 to 15 - dst.fp16[i] := ScaleFP16(a.fp16[i], b.fp16[i]) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + FI + FI + RETURN tmp1 * POW(2.0, FLOOR(tmp2)) +} +FOR i := 0 to 15 + dst.fp16[i] := ScaleFP16(a.fp16[i], b.fp16[i]) +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - Scale the packed half-precision (16-bit) floating-point elements in "a" using - values from "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - DEFINE ScaleFP16(src1, src2) { - denormal1 := (a.exp == 0) and (a.fraction != 0) - denormal2 := (b.exp == 0) and (b.fraction != 0) - tmp1 := src1 - tmp2 := src2 - IF MXCSR.DAZ - IF denormal1 + + + + + + Scale the packed half-precision (16-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + DEFINE ScaleFP16(src1, src2) { + denormal1 := (a.exp == 0) and (a.fraction != 0) + denormal2 := (b.exp == 0) and (b.fraction != 0) + tmp1 := src1 + tmp2 := src2 + IF MXCSR.DAZ + IF denormal1 tmp1 := 0 - FI - IF denormal2 + FI + IF denormal2 tmp2 := 0 - FI - FI - RETURN tmp1 * POW(2.0, FLOOR(tmp2)) - } - FOR i := 0 to 15 - IF k[i] - dst.fp16[i] := ScaleFP16(a.fp16[i], b.fp16[i]) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + FI + FI + RETURN tmp1 * POW(2.0, FLOOR(tmp2)) +} +FOR i := 0 to 15 + IF k[i] + dst.fp16[i] := ScaleFP16(a.fp16[i], b.fp16[i]) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - - Scale the packed half-precision (16-bit) floating-point elements in "a" using - values from "b", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - [round_note] - DEFINE ScaleFP16(src1, src2) { - denormal1 := (a.exp == 0) and (a.fraction != 0) - denormal2 := (b.exp == 0) and (b.fraction != 0) - tmp1 := src1 - tmp2 := src2 - IF MXCSR.DAZ - IF denormal1 + + + + + + + Scale the packed half-precision (16-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + DEFINE ScaleFP16(src1, src2) { + denormal1 := (a.exp == 0) and (a.fraction != 0) + denormal2 := (b.exp == 0) and (b.fraction != 0) + tmp1 := src1 + tmp2 := src2 + IF MXCSR.DAZ + IF denormal1 tmp1 := 0 - FI - IF denormal2 + FI + IF denormal2 tmp2 := 0 - FI - FI - RETURN tmp1 * POW(2.0, FLOOR(tmp2)) - } - FOR i := 0 to 15 - IF k[i] - dst.fp16[i] := ScaleFP16(a.fp16[i], b.fp16[i]) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + FI + FI + RETURN tmp1 * POW(2.0, FLOOR(tmp2)) +} +FOR i := 0 to 15 + IF k[i] + dst.fp16[i] := ScaleFP16(a.fp16[i], b.fp16[i]) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - Scale the packed half-precision (16-bit) floating-point elements in "a" using - values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - DEFINE ScaleFP16(src1, src2) { - denormal1 := (a.exp == 0) and (a.fraction != 0) - denormal2 := (b.exp == 0) and (b.fraction != 0) - tmp1 := src1 - tmp2 := src2 - IF MXCSR.DAZ - IF denormal1 + + + + + Scale the packed half-precision (16-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + DEFINE ScaleFP16(src1, src2) { + denormal1 := (a.exp == 0) and (a.fraction != 0) + denormal2 := (b.exp == 0) and (b.fraction != 0) + tmp1 := src1 + tmp2 := src2 + IF MXCSR.DAZ + IF denormal1 tmp1 := 0 - FI - IF denormal2 + FI + IF denormal2 tmp2 := 0 - FI - FI - RETURN tmp1 * POW(2.0, FLOOR(tmp2)) - } - FOR i := 0 to 15 - IF k[i] - dst.fp16[i] := ScaleFP16(a.fp16[i], b.fp16[i]) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + FI + FI + RETURN tmp1 * POW(2.0, FLOOR(tmp2)) +} +FOR i := 0 to 15 + IF k[i] + dst.fp16[i] := ScaleFP16(a.fp16[i], b.fp16[i]) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - Scale the packed half-precision (16-bit) floating-point elements in "a" using - values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - [round_note] - DEFINE ScaleFP16(src1, src2) { - denormal1 := (a.exp == 0) and (a.fraction != 0) - denormal2 := (b.exp == 0) and (b.fraction != 0) - tmp1 := src1 - tmp2 := src2 - IF MXCSR.DAZ - IF denormal1 + + + + + + Scale the packed half-precision (16-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + DEFINE ScaleFP16(src1, src2) { + denormal1 := (a.exp == 0) and (a.fraction != 0) + denormal2 := (b.exp == 0) and (b.fraction != 0) + tmp1 := src1 + tmp2 := src2 + IF MXCSR.DAZ + IF denormal1 tmp1 := 0 - FI - IF denormal2 + FI + IF denormal2 tmp2 := 0 - FI - FI - RETURN tmp1 * POW(2.0, FLOOR(tmp2)) - } - FOR i := 0 to 15 - IF k[i] - dst.fp16[i] := ScaleFP16(a.fp16[i], b.fp16[i]) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + FI + FI + RETURN tmp1 * POW(2.0, FLOOR(tmp2)) +} +FOR i := 0 to 15 + IF k[i] + dst.fp16[i] := ScaleFP16(a.fp16[i], b.fp16[i]) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - Scale the packed single-precision (32-bit) floating-point elements in "a" using - values from "b", store the result in the lower element of "dst", and copy the upper 7 - packed elements from "a" to the upper elements of "dst". - DEFINE ScaleFP16(src1, src2) { - denormal1 := (a.exp == 0) and (a.fraction != 0) - denormal2 := (b.exp == 0) and (b.fraction != 0) - tmp1 := src1 - tmp2 := src2 - IF MXCSR.DAZ - IF denormal1 + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + DEFINE ScaleFP16(src1, src2) { + denormal1 := (a.exp == 0) and (a.fraction != 0) + denormal2 := (b.exp == 0) and (b.fraction != 0) + tmp1 := src1 + tmp2 := src2 + IF MXCSR.DAZ + IF denormal1 tmp1 := 0 - FI - IF denormal2 + FI + IF denormal2 tmp2 := 0 - FI - FI - RETURN tmp1 * POW(2.0, FLOOR(tmp2)) - } - dst.fp16[0] := ScaleFP16(a.fp16[0], b.fp16[0]) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + FI + FI + RETURN tmp1 * POW(2.0, FLOOR(tmp2)) +} +dst.fp16[0] := ScaleFP16(a.fp16[0], b.fp16[0]) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 +
+ + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - Scale the packed single-precision (32-bit) floating-point elements in "a" using - values from "b", store the result in the lower element of "dst", and copy the upper 7 - packed elements from "a" to the upper elements of "dst". - [round_note] - DEFINE ScaleFP16(src1, src2) { - denormal1 := (a.exp == 0) and (a.fraction != 0) - denormal2 := (b.exp == 0) and (b.fraction != 0) - tmp1 := src1 - tmp2 := src2 - IF MXCSR.DAZ - IF denormal1 + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + DEFINE ScaleFP16(src1, src2) { + denormal1 := (a.exp == 0) and (a.fraction != 0) + denormal2 := (b.exp == 0) and (b.fraction != 0) + tmp1 := src1 + tmp2 := src2 + IF MXCSR.DAZ + IF denormal1 tmp1 := 0 - FI - IF denormal2 + FI + IF denormal2 tmp2 := 0 - FI - FI - RETURN tmp1 * POW(2.0, FLOOR(tmp2)) - } - dst.fp16[0] := ScaleFP16(a.fp16[0], b.fp16[0]) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + FI + FI + RETURN tmp1 * POW(2.0, FLOOR(tmp2)) +} +dst.fp16[0] := ScaleFP16(a.fp16[0], b.fp16[0]) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 +
+ + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - Scale the packed single-precision (32-bit) floating-point elements in "a" using - values from "b", store the result in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed - elements from "a" to the upper elements of "dst". - DEFINE ScaleFP16(src1, src2) { - denormal1 := (a.exp == 0) and (a.fraction != 0) - denormal2 := (b.exp == 0) and (b.fraction != 0) - tmp1 := src1 - tmp2 := src2 - IF MXCSR.DAZ - IF denormal1 + + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + DEFINE ScaleFP16(src1, src2) { + denormal1 := (a.exp == 0) and (a.fraction != 0) + denormal2 := (b.exp == 0) and (b.fraction != 0) + tmp1 := src1 + tmp2 := src2 + IF MXCSR.DAZ + IF denormal1 tmp1 := 0 - FI - IF denormal2 + FI + IF denormal2 tmp2 := 0 - FI - FI - RETURN tmp1 * POW(2.0, FLOOR(tmp2)) - } - IF k[0] - dst.fp16[0] := ScaleFP16(a.fp16[0], b.fp16[0]) - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + FI + FI + RETURN tmp1 * POW(2.0, FLOOR(tmp2)) +} +IF k[0] + dst.fp16[0] := ScaleFP16(a.fp16[0], b.fp16[0]) +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 +
+ + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - - Scale the packed single-precision (32-bit) floating-point elements in "a" using - values from "b", store the result in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed - elements from "a" to the upper elements of "dst". - [round_note] - DEFINE ScaleFP16(src1, src2) { - denormal1 := (a.exp == 0) and (a.fraction != 0) - denormal2 := (b.exp == 0) and (b.fraction != 0) - tmp1 := src1 - tmp2 := src2 - IF MXCSR.DAZ - IF denormal1 + + + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + DEFINE ScaleFP16(src1, src2) { + denormal1 := (a.exp == 0) and (a.fraction != 0) + denormal2 := (b.exp == 0) and (b.fraction != 0) + tmp1 := src1 + tmp2 := src2 + IF MXCSR.DAZ + IF denormal1 tmp1 := 0 - FI - IF denormal2 + FI + IF denormal2 tmp2 := 0 - FI - FI - RETURN tmp1 * POW(2.0, FLOOR(tmp2)) - } - IF k[0] - dst.fp16[0] := ScaleFP16(a.fp16[0], b.fp16[0]) - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + FI + FI + RETURN tmp1 * POW(2.0, FLOOR(tmp2)) +} +IF k[0] + dst.fp16[0] := ScaleFP16(a.fp16[0], b.fp16[0]) +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 +
+ + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - Scale the packed single-precision (32-bit) floating-point elements in "a" using - values from "b", store the result in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements - from "a" to the upper elements of "dst". - DEFINE ScaleFP16(src1, src2) { - denormal1 := (a.exp == 0) and (a.fraction != 0) - denormal2 := (b.exp == 0) and (b.fraction != 0) - tmp1 := src1 - tmp2 := src2 - IF MXCSR.DAZ - IF denormal1 + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + DEFINE ScaleFP16(src1, src2) { + denormal1 := (a.exp == 0) and (a.fraction != 0) + denormal2 := (b.exp == 0) and (b.fraction != 0) + tmp1 := src1 + tmp2 := src2 + IF MXCSR.DAZ + IF denormal1 tmp1 := 0 - FI - IF denormal2 + FI + IF denormal2 tmp2 := 0 - FI - FI - RETURN tmp1 * POW(2.0, FLOOR(tmp2)) - } - IF k[0] - dst.fp16[0] := ScaleFP16(a.fp16[0], b.fp16[0]) - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + FI + FI + RETURN tmp1 * POW(2.0, FLOOR(tmp2)) +} +IF k[0] + dst.fp16[0] := ScaleFP16(a.fp16[0], b.fp16[0]) +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 +
+ + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - - Scale the packed single-precision (32-bit) floating-point elements in "a" using - values from "b", store the result in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements - from "a" to the upper elements of "dst". - [round_note] - DEFINE ScaleFP16(src1, src2) { - denormal1 := (a.exp == 0) and (a.fraction != 0) - denormal2 := (b.exp == 0) and (b.fraction != 0) - tmp1 := src1 - tmp2 := src2 - IF MXCSR.DAZ - IF denormal1 + + + + + + Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + DEFINE ScaleFP16(src1, src2) { + denormal1 := (a.exp == 0) and (a.fraction != 0) + denormal2 := (b.exp == 0) and (b.fraction != 0) + tmp1 := src1 + tmp2 := src2 + IF MXCSR.DAZ + IF denormal1 tmp1 := 0 - FI - IF denormal2 + FI + IF denormal2 tmp2 := 0 - FI - FI - RETURN tmp1 * POW(2.0, FLOOR(tmp2)) - } - IF k[0] - dst.fp16[0] := ScaleFP16(a.fp16[0], b.fp16[0]) - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + FI + FI + RETURN tmp1 * POW(2.0, FLOOR(tmp2)) +} +IF k[0] + dst.fp16[0] := ScaleFP16(a.fp16[0], b.fp16[0]) +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 +
+ + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - Test packed half-precision (16-bit) floating-point elements in "a" for special - categories specified by "imm8", and store the results in mask vector "k". - [fpclass_note] - FOR i := 0 to 31 - k[i] := CheckFPClass_FP16(a.fp16[i], imm8[7:0]) - ENDFOR - k[MAX:32] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + Test packed half-precision (16-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k". + [fpclass_note] + FOR i := 0 to 31 + k[i] := CheckFPClass_FP16(a.fp16[i], imm8[7:0]) +ENDFOR +k[MAX:32] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - Test packed half-precision (16-bit) floating-point elements in "a" for special - categories specified by "imm8", and store the results in mask vector "k" using zeromask - "k1" (elements are zeroed out when the corresponding mask bit is not set). + + + + + Test packed half-precision (16-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). [fpclass_note] - FOR i := 0 to 31 - IF k1[i] - k[i] := CheckFPClass_FP16(a.fp16[i], imm8[7:0]) - ELSE - k[i] := 0 - FI - ENDFOR - k[MAX:32] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + FOR i := 0 to 31 + IF k1[i] + k[i] := CheckFPClass_FP16(a.fp16[i], imm8[7:0]) + ELSE + k[i] := 0 + FI +ENDFOR +k[MAX:32] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - Test the lower half-precision (16-bit) floating-point element in "a" for - special categories specified by "imm8", and store the result in mask vector "k". + + + + Test the lower half-precision (16-bit) floating-point element in "a" for special categories specified by "imm8", and store the result in mask vector "k". [fpclass_note] - k[0] := CheckFPClass_FP16(a.fp16[0], imm8[7:0]) - k[MAX:1] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + k[0] := CheckFPClass_FP16(a.fp16[0], imm8[7:0]) +k[MAX:1] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - Test the lower half-precision (16-bit) floating-point element in "a" for - special categories specified by "imm8", and store the result in mask vector "k" using - zeromask "k1" (the element is zeroed out when mask bit 0 is not set). - [fpclass_note] - IF k1[0] - k[0] := CheckFPClass_FP16(a.fp16[0], imm8[7:0]) - ELSE - k[0] := 0 - FI - k[MAX:1] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + Test the lower half-precision (16-bit) floating-point element in "a" for special categories specified by "imm8", and store the result in mask vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set). + [fpclass_note] + IF k1[0] + k[0] := CheckFPClass_FP16(a.fp16[0], imm8[7:0]) +ELSE + k[0] := 0 +FI +k[MAX:1] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - Shuffle half-precision (16-bit) floating-point elements in "a" and "b" across - lanes using the corresponding selector and index in "idx", and store the results in - "dst". - - FOR j := 0 to 31 - i := j*16 - off := idx[i+4:i] - dst.fp16[j] := idx[i+5] ? b.fp16[off] : a.fp16[off] - ENDFOR - dst[MAX:512] := 0 - - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + Shuffle half-precision (16-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + off := idx[i+4:i] + dst.fp16[j] := idx[i+5] ? b.fp16[off] : a.fp16[off] +ENDFOR +dst[MAX:512] := 0 + + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - - Blend packed half-precision (16-bit) floating-point elements from "a" and "b" - using control mask "k", and store the results in "dst". - - FOR j := 0 to 31 - IF k[j] - dst.fp16[j] := b.fp16[j] - ELSE - dst.fp16[j] := a.fp16[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + + Blend packed half-precision (16-bit) floating-point elements from "a" and "b" using control mask "k", and store the results in "dst". + +FOR j := 0 to 31 + IF k[j] + dst.fp16[j] := b.fp16[j] + ELSE + dst.fp16[j] := a.fp16[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - - Shuffle half-precision (16-bit) floating-point elements in "a" across lanes - using the corresponding index in "idx", and store the results in "dst". - - FOR j := 0 to 31 - i := j*16 - id := idx[i+4:i] - dst.fp16[j] := a.fp16[id] - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Miscellaneous + + + + Shuffle half-precision (16-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst". + +FOR j := 0 to 31 + i := j*16 + id := idx[i+4:i] + dst.fp16[j] := a.fp16[id] +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Miscellaneous
- - - Compute the approximate reciprocal square root of packed half-precision - (16-bit) floating-point elements in "a", and store the results in "dst". The maximum - relative error for this approximation is less than 1.5*2^-12. - - FOR i := 0 to 31 - dst.fp16[i] := (1.0 / SQRT(a.fp16[i])) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Elementary Math Functions + + + Compute the approximate reciprocal square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 1.5*2^-12. + +FOR i := 0 to 31 + dst.fp16[i] := (1.0 / SQRT(a.fp16[i])) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the approximate reciprocal square root of packed half-precision - (16-bit) floating-point elements in "a", and store the results in "dst" using writemask - "k" (elements are copied from "src" when the corresponding mask bit is not set). The - maximum relative error for this approximation is less than 1.5*2^-12. - - FOR i := 0 to 31 - IF k[i] - dst.fp16[i] := (1.0 / SQRT(a.fp16[i])) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Elementary Math Functions + + + + + Compute the approximate reciprocal square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 1.5*2^-12. + +FOR i := 0 to 31 + IF k[i] + dst.fp16[i] := (1.0 / SQRT(a.fp16[i])) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Elementary Math Functions
- - - - Compute the approximate reciprocal square root of packed half-precision - (16-bit) floating-point elements in "a", and store the results in "dst" using zeromask - "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 1.5*2^-12. - - FOR i := 0 to 31 - IF k[i] - dst.fp16[i] := (1.0 / SQRT(a.fp16[i])) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Elementary Math Functions + + + + Compute the approximate reciprocal square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 1.5*2^-12. + +FOR i := 0 to 31 + IF k[i] + dst.fp16[i] := (1.0 / SQRT(a.fp16[i])) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Elementary Math Functions
- - - - Compute the approximate reciprocal square root of the lower half-precision - (16-bit) floating-point element in "b", store the result in the lower element of "dst", - and copy the upper 7 packed elements from "a" to the upper elements of "dst". The - maximum relative error for this approximation is less than 1.5*2^-12. - - dst.fp16[0] := (1.0 / SQRT(b.fp16[0])) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Elementary Math Functions + + + + Compute the approximate reciprocal square root of the lower half-precision (16-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 1.5*2^-12. + +dst.fp16[0] := (1.0 / SQRT(b.fp16[0])) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Elementary Math Functions
- - - - - - Compute the approximate reciprocal square root of the lower half-precision - (16-bit) floating-point element in "b", store the result in the lower element of "dst" - using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and - copy the upper 7 packed elements from "a" to the upper elements of "dst". The maximum - relative error for this approximation is less than 1.5*2^-12. - - IF k[0] - dst.fp16[0] := (1.0 / SQRT(b.fp16[0])) - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Elementary Math Functions + + + + + + Compute the approximate reciprocal square root of the lower half-precision (16-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 1.5*2^-12. + +IF k[0] + dst.fp16[0] := (1.0 / SQRT(b.fp16[0])) +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the approximate reciprocal square root of the lower half-precision - (16-bit) floating-point element in "b", store the result in the lower element of "dst" - using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the - upper 7 packed elements from "a" to the upper elements of "dst". The maximum relative - error for this approximation is less than 1.5*2^-12. - - IF k[0] - dst.fp16[0] := (1.0 / SQRT(b.fp16[0])) - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Elementary Math Functions -
- - - - Compute the square root of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst". - - FOR i := 0 to 31 - dst.fp16[i] := SQRT(a.fp16[i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Elementary Math Functions + + + + + Compute the approximate reciprocal square root of the lower half-precision (16-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 1.5*2^-12. + +IF k[0] + dst.fp16[0] := (1.0 / SQRT(b.fp16[0])) +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Elementary Math Functions +
+ + + + Compute the square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". + +FOR i := 0 to 31 + dst.fp16[i] := SQRT(a.fp16[i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Elementary Math Functions
- - - - Compute the square root of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst". - [round_note] - - FOR i := 0 to 31 - dst.fp16[i] := SQRT(a.fp16[i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Elementary Math Functions + + + + Compute the square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". + [round_note] + +FOR i := 0 to 31 + dst.fp16[i] := SQRT(a.fp16[i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the square root of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - - FOR i := 0 to 31 - IF k[i] - dst.fp16[i] := SQRT(a.fp16[i]) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Elementary Math Functions + + + + + Compute the square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR i := 0 to 31 + IF k[i] + dst.fp16[i] := SQRT(a.fp16[i]) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Elementary Math Functions
- - - - - - Compute the square root of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst" using writemask "k" (elements are copied - from "src" when the corresponding mask bit is not set). - [round_note] - - FOR i := 0 to 31 - IF k[i] - dst.fp16[i] := SQRT(a.fp16[i]) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Elementary Math Functions + + + + + + Compute the square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + [round_note] + +FOR i := 0 to 31 + IF k[i] + dst.fp16[i] := SQRT(a.fp16[i]) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Elementary Math Functions
- - - - Compute the square root of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR i := 0 to 31 - IF k[i] - dst.fp16[i] := SQRT(a.fp16[i]) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Elementary Math Functions + + + + Compute the square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR i := 0 to 31 + IF k[i] + dst.fp16[i] := SQRT(a.fp16[i]) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the square root of packed half-precision (16-bit) floating-point - elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - [round_note] - - FOR i := 0 to 31 - IF k[i] - dst.fp16[i] := SQRT(a.fp16[i]) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Elementary Math Functions + + + + + Compute the square root of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + [round_note] + +FOR i := 0 to 31 + IF k[i] + dst.fp16[i] := SQRT(a.fp16[i]) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Elementary Math Functions
- - - - Compute the square root of the lower half-precision (16-bit) floating-point - element in "b", store the result in the lower element of "dst", and copy the upper 7 - packed elements from "a" to the upper elements of "dst". - - dst.fp16[0] := SQRT(b.fp16[0]) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Elementary Math Functions + + + + Compute the square root of the lower half-precision (16-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +dst.fp16[0] := SQRT(b.fp16[0]) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the square root of the lower half-precision (16-bit) floating-point - element in "b", store the result in the lower element of "dst", and copy the upper 7 - packed elements from "a" to the upper elements of "dst". - [round_note] - - dst.fp16[0] := SQRT(b.fp16[0]) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Elementary Math Functions + + + + + Compute the square root of the lower half-precision (16-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst.fp16[0] := SQRT(b.fp16[0]) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Elementary Math Functions
- - - - - - Compute the square root of the lower half-precision (16-bit) floating-point - element in "b", store the result in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed - elements from "a" to the upper elements of "dst". - - IF k[0] - dst.fp16[0] := SQRT(b.fp16[0]) - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Elementary Math Functions + + + + + + Compute the square root of the lower half-precision (16-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := SQRT(b.fp16[0]) +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Elementary Math Functions
- - - - - - - Compute the square root of the lower half-precision (16-bit) floating-point - element in "b", store the result in the lower element of "dst" using writemask "k" (the - element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed - elements from "a" to the upper elements of "dst". - [round_note] - - IF k[0] - dst.fp16[0] := SQRT(b.fp16[0]) - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Elementary Math Functions + + + + + + + Compute the square root of the lower half-precision (16-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst.fp16[0] := SQRT(b.fp16[0]) +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the square root of the lower half-precision (16-bit) floating-point - element in "b", store the result in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements - from "a" to the upper elements of "dst". - - IF k[0] - dst.fp16[0] := SQRT(b.fp16[0]) - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Elementary Math Functions + + + + + Compute the square root of the lower half-precision (16-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + +IF k[0] + dst.fp16[0] := SQRT(b.fp16[0]) +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Elementary Math Functions
- - - - - - Compute the square root of the lower half-precision (16-bit) floating-point - element in "b", store the result in the lower element of "dst" using zeromask "k" (the - element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements - from "a" to the upper elements of "dst". - [round_note] - - IF k[0] - dst.fp16[0] := SQRT(b.fp16[0]) - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Elementary Math Functions + + + + + + Compute the square root of the lower half-precision (16-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". + [round_note] + +IF k[0] + dst.fp16[0] := SQRT(b.fp16[0]) +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Elementary Math Functions
- - - Compute the approximate reciprocal of packed half-precision (16-bit) - floating-point elements in "a", and store the results in "dst". The maximum relative - error for this approximation is less than 1.5*2^-12. - - FOR i := 0 to 31 - dst.fp16[i] := (1.0 / a.fp16[i]) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Elementary Math Functions + + + Compute the approximate reciprocal of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 1.5*2^-12. + +FOR i := 0 to 31 + dst.fp16[i] := (1.0 / a.fp16[i]) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the approximate reciprocal of packed half-precision (16-bit) - floating-point elements in "a", and store the results in "dst" using writemask "k" - (elements are copied from "src" when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 1.5*2^-12. - - FOR i := 0 to 31 - IF k[i] - dst.fp16[i] := (1.0 / a.fp16[i]) - ELSE - dst.fp16[i] := src.fp16[i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Elementary Math Functions + + + + + Compute the approximate reciprocal of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 1.5*2^-12. + +FOR i := 0 to 31 + IF k[i] + dst.fp16[i] := (1.0 / a.fp16[i]) + ELSE + dst.fp16[i] := src.fp16[i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Elementary Math Functions
- - - - Compute the approximate reciprocal of packed half-precision (16-bit) - floating-point elements in "a", and store the results in "dst" using zeromask "k" - (elements are zeroed out when the corresponding mask bit is not set). The maximum - relative error for this approximation is less than 1.5*2^-12. - - FOR i := 0 to 31 - IF k[i] - dst.fp16[i] := (1.0 / a.fp16[i]) - ELSE - dst.fp16[i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_FP16 -
immintrin.h
- Elementary Math Functions + + + + Compute the approximate reciprocal of packed half-precision (16-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 1.5*2^-12. + +FOR i := 0 to 31 + IF k[i] + dst.fp16[i] := (1.0 / a.fp16[i]) + ELSE + dst.fp16[i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_FP16 +
immintrin.h
+ Elementary Math Functions
- - - - Compute the approximate reciprocal of the lower half-precision (16-bit) - floating-point element in "a", store the result in the lower element of "dst", and copy - the upper 7 packed elements from "a" to the upper elements of "dst". The maximum - relative error for this approximation is less than 1.5*2^-12. - - dst.fp16[0] := (1.0 / b.fp16[0]) - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Elementary Math Functions + + + + Compute the approximate reciprocal of the lower half-precision (16-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper 7 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 1.5*2^-12. + +dst.fp16[0] := (1.0 / b.fp16[0]) +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Elementary Math Functions
- - - - - - Compute the approximate reciprocal of the lower half-precision (16-bit) - floating-point element in "a", store the result in the lower element of "dst" using - writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy - the upper 7 packed elements from "a" to the upper elements of "dst". The maximum - relative error for this approximation is less than 1.5*2^-12. - - IF k[0] - dst.fp16[0] := (1.0 / b.fp16[0]) - ELSE - dst.fp16[0] := src.fp16[0] - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Elementary Math Functions + + + + + + Compute the approximate reciprocal of the lower half-precision (16-bit) floating-point element in "a", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 1.5*2^-12. + +IF k[0] + dst.fp16[0] := (1.0 / b.fp16[0]) +ELSE + dst.fp16[0] := src.fp16[0] +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Elementary Math Functions
- - - - - Compute the approximate reciprocal of the lower half-precision (16-bit) - floating-point element in "a", store the result in the lower element of "dst" using - zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper - 7 packed elements from "a" to the upper elements of "dst". The maximum relative error - for this approximation is less than 1.5*2^-12. - - IF k[0] - dst.fp16[0] := (1.0 / b.fp16[0]) - ELSE - dst.fp16[0] := 0 - FI - dst[127:16] := a[127:16] - dst[MAX:128] := 0 - - - AVX512_FP16 -
immintrin.h
- Elementary Math Functions + + + + + Compute the approximate reciprocal of the lower half-precision (16-bit) floating-point element in "a", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 7 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 1.5*2^-12. + +IF k[0] + dst.fp16[0] := (1.0 / b.fp16[0]) +ELSE + dst.fp16[0] := 0 +FI +dst[127:16] := a[127:16] +dst[MAX:128] := 0 + + + AVX512_FP16 +
immintrin.h
+ Elementary Math Functions
- - - - - - - - - - Set packed half-precision (16-bit) floating-point elements in "dst" with the - supplied values. - - dst.fp16[0] := e0 - dst.fp16[1] := e1 - dst.fp16[2] := e2 - dst.fp16[3] := e3 - dst.fp16[4] := e4 - dst.fp16[5] := e5 - dst.fp16[6] := e6 - dst.fp16[7] := e7 - - AVX512_FP16 -
immintrin.h
- Set + + + + + + + + + + Set packed half-precision (16-bit) floating-point elements in "dst" with the supplied values. + +dst.fp16[0] := e0 +dst.fp16[1] := e1 +dst.fp16[2] := e2 +dst.fp16[3] := e3 +dst.fp16[4] := e4 +dst.fp16[5] := e5 +dst.fp16[6] := e6 +dst.fp16[7] := e7 + + AVX512_FP16 +
immintrin.h
+ Set
- - - - - - - - - - - - - - - - - - Set packed half-precision (16-bit) floating-point elements in "dst" with the - supplied values. - - dst.fp16[0] := e0 - dst.fp16[1] := e1 - dst.fp16[2] := e2 - dst.fp16[3] := e3 - dst.fp16[4] := e4 - dst.fp16[5] := e5 - dst.fp16[6] := e6 - dst.fp16[7] := e7 - dst.fp16[8] := e8 - dst.fp16[9] := e9 - dst.fp16[10] := e10 - dst.fp16[11] := e11 - dst.fp16[12] := e12 - dst.fp16[13] := e13 - dst.fp16[14] := e14 - dst.fp16[15] := e15 - - AVX512_FP16 -
immintrin.h
- Set + + + + + + + + + + + + + + + + + + Set packed half-precision (16-bit) floating-point elements in "dst" with the supplied values. + +dst.fp16[0] := e0 +dst.fp16[1] := e1 +dst.fp16[2] := e2 +dst.fp16[3] := e3 +dst.fp16[4] := e4 +dst.fp16[5] := e5 +dst.fp16[6] := e6 +dst.fp16[7] := e7 +dst.fp16[8] := e8 +dst.fp16[9] := e9 +dst.fp16[10] := e10 +dst.fp16[11] := e11 +dst.fp16[12] := e12 +dst.fp16[13] := e13 +dst.fp16[14] := e14 +dst.fp16[15] := e15 + + AVX512_FP16 +
immintrin.h
+ Set
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Set packed half-precision (16-bit) floating-point elements in "dst" with the - supplied values. - - dst.fp16[0] := e0 - dst.fp16[1] := e1 - dst.fp16[2] := e2 - dst.fp16[3] := e3 - dst.fp16[4] := e4 - dst.fp16[5] := e5 - dst.fp16[6] := e6 - dst.fp16[7] := e7 - dst.fp16[8] := e8 - dst.fp16[9] := e9 - dst.fp16[10] := e10 - dst.fp16[11] := e11 - dst.fp16[12] := e12 - dst.fp16[13] := e13 - dst.fp16[14] := e14 - dst.fp16[15] := e15 - dst.fp16[16] := e16 - dst.fp16[17] := e17 - dst.fp16[18] := e18 - dst.fp16[19] := e19 - dst.fp16[20] := e20 - dst.fp16[21] := e21 - dst.fp16[22] := e22 - dst.fp16[23] := e23 - dst.fp16[24] := e24 - dst.fp16[25] := e25 - dst.fp16[26] := e26 - dst.fp16[27] := e27 - dst.fp16[28] := e28 - dst.fp16[29] := e29 - dst.fp16[30] := e30 - dst.fp16[31] := e31 - - AVX512_FP16 -
immintrin.h
- Set + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Set packed half-precision (16-bit) floating-point elements in "dst" with the supplied values. + +dst.fp16[0] := e0 +dst.fp16[1] := e1 +dst.fp16[2] := e2 +dst.fp16[3] := e3 +dst.fp16[4] := e4 +dst.fp16[5] := e5 +dst.fp16[6] := e6 +dst.fp16[7] := e7 +dst.fp16[8] := e8 +dst.fp16[9] := e9 +dst.fp16[10] := e10 +dst.fp16[11] := e11 +dst.fp16[12] := e12 +dst.fp16[13] := e13 +dst.fp16[14] := e14 +dst.fp16[15] := e15 +dst.fp16[16] := e16 +dst.fp16[17] := e17 +dst.fp16[18] := e18 +dst.fp16[19] := e19 +dst.fp16[20] := e20 +dst.fp16[21] := e21 +dst.fp16[22] := e22 +dst.fp16[23] := e23 +dst.fp16[24] := e24 +dst.fp16[25] := e25 +dst.fp16[26] := e26 +dst.fp16[27] := e27 +dst.fp16[28] := e28 +dst.fp16[29] := e29 +dst.fp16[30] := e30 +dst.fp16[31] := e31 + + AVX512_FP16 +
immintrin.h
+ Set
- - - - - - - - - - Set packed half-precision (16-bit) floating-point elements in "dst" with the - supplied values in reverse order. - - dst.fp16[0] := e7 - dst.fp16[1] := e6 - dst.fp16[2] := e5 - dst.fp16[3] := e4 - dst.fp16[4] := e3 - dst.fp16[5] := e2 - dst.fp16[6] := e1 - dst.fp16[7] := e0 - - AVX512_FP16 -
immintrin.h
- Set + + + + + + + + + + Set packed half-precision (16-bit) floating-point elements in "dst" with the supplied values in reverse order. + +dst.fp16[0] := e7 +dst.fp16[1] := e6 +dst.fp16[2] := e5 +dst.fp16[3] := e4 +dst.fp16[4] := e3 +dst.fp16[5] := e2 +dst.fp16[6] := e1 +dst.fp16[7] := e0 + + AVX512_FP16 +
immintrin.h
+ Set
- - - - - - - - - - - - - - - - - - Set packed half-precision (16-bit) floating-point elements in "dst" with the - supplied values in reverse order. - - dst.fp16[0] := e15 - dst.fp16[1] := e14 - dst.fp16[2] := e13 - dst.fp16[3] := e12 - dst.fp16[4] := e11 - dst.fp16[5] := e10 - dst.fp16[6] := e9 - dst.fp16[7] := e8 - dst.fp16[8] := e7 - dst.fp16[9] := e6 - dst.fp16[10] := e5 - dst.fp16[11] := e4 - dst.fp16[12] := e3 - dst.fp16[13] := e2 - dst.fp16[14] := e1 - dst.fp16[15] := e0 - - AVX512_FP16 -
immintrin.h
- Set + + + + + + + + + + + + + + + + + + Set packed half-precision (16-bit) floating-point elements in "dst" with the supplied values in reverse order. + +dst.fp16[0] := e15 +dst.fp16[1] := e14 +dst.fp16[2] := e13 +dst.fp16[3] := e12 +dst.fp16[4] := e11 +dst.fp16[5] := e10 +dst.fp16[6] := e9 +dst.fp16[7] := e8 +dst.fp16[8] := e7 +dst.fp16[9] := e6 +dst.fp16[10] := e5 +dst.fp16[11] := e4 +dst.fp16[12] := e3 +dst.fp16[13] := e2 +dst.fp16[14] := e1 +dst.fp16[15] := e0 + + AVX512_FP16 +
immintrin.h
+ Set
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Set packed half-precision (16-bit) floating-point elements in "dst" with the - supplied values in reverse order. - - dst.fp16[0] := e31 - dst.fp16[1] := e30 - dst.fp16[2] := e29 - dst.fp16[3] := e28 - dst.fp16[4] := e27 - dst.fp16[5] := e26 - dst.fp16[6] := e25 - dst.fp16[7] := e24 - dst.fp16[8] := e23 - dst.fp16[9] := e22 - dst.fp16[10] := e21 - dst.fp16[11] := e20 - dst.fp16[12] := e19 - dst.fp16[13] := e18 - dst.fp16[14] := e17 - dst.fp16[15] := e16 - dst.fp16[16] := e15 - dst.fp16[17] := e14 - dst.fp16[18] := e13 - dst.fp16[19] := e12 - dst.fp16[20] := e11 - dst.fp16[21] := e10 - dst.fp16[22] := e9 - dst.fp16[23] := e8 - dst.fp16[24] := e7 - dst.fp16[25] := e6 - dst.fp16[26] := e5 - dst.fp16[27] := e4 - dst.fp16[28] := e3 - dst.fp16[29] := e2 - dst.fp16[30] := e1 - dst.fp16[31] := e0 - - AVX512_FP16 -
immintrin.h
- Set + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Set packed half-precision (16-bit) floating-point elements in "dst" with the supplied values in reverse order. + +dst.fp16[0] := e31 +dst.fp16[1] := e30 +dst.fp16[2] := e29 +dst.fp16[3] := e28 +dst.fp16[4] := e27 +dst.fp16[5] := e26 +dst.fp16[6] := e25 +dst.fp16[7] := e24 +dst.fp16[8] := e23 +dst.fp16[9] := e22 +dst.fp16[10] := e21 +dst.fp16[11] := e20 +dst.fp16[12] := e19 +dst.fp16[13] := e18 +dst.fp16[14] := e17 +dst.fp16[15] := e16 +dst.fp16[16] := e15 +dst.fp16[17] := e14 +dst.fp16[18] := e13 +dst.fp16[19] := e12 +dst.fp16[20] := e11 +dst.fp16[21] := e10 +dst.fp16[22] := e9 +dst.fp16[23] := e8 +dst.fp16[24] := e7 +dst.fp16[25] := e6 +dst.fp16[26] := e5 +dst.fp16[27] := e4 +dst.fp16[28] := e3 +dst.fp16[29] := e2 +dst.fp16[30] := e1 +dst.fp16[31] := e0 + + AVX512_FP16 +
immintrin.h
+ Set
- - - Broadcast half-precision (16-bit) floating-point value "a" to all elements of - "dst". - - FOR i := 0 to 7 - dst.fp16[i] := a[15:0] - ENDFOR - dst[MAX:128] := 0 - - AVX512_FP16 -
immintrin.h
- Set + + + Broadcast half-precision (16-bit) floating-point value "a" to all elements of "dst". + +FOR i := 0 to 7 + dst.fp16[i] := a[15:0] +ENDFOR +dst[MAX:128] := 0 + + AVX512_FP16 +
immintrin.h
+ Set
- - - Broadcast half-precision (16-bit) floating-point value "a" to all elements of - "dst". - - FOR i := 0 to 15 - dst.fp16[i] := a[15:0] - ENDFOR - dst[MAX:256] := 0 - - AVX512_FP16 -
immintrin.h
- Set + + + Broadcast half-precision (16-bit) floating-point value "a" to all elements of "dst". + +FOR i := 0 to 15 + dst.fp16[i] := a[15:0] +ENDFOR +dst[MAX:256] := 0 + + AVX512_FP16 +
immintrin.h
+ Set
- - - Broadcast half-precision (16-bit) floating-point value "a" to all elements of - "dst". - - FOR i := 0 to 31 - dst.fp16[i] := a[15:0] - ENDFOR - dst[MAX:512] := 0 - - AVX512_FP16 -
immintrin.h
- Set + + + Broadcast half-precision (16-bit) floating-point value "a" to all elements of "dst". + +FOR i := 0 to 31 + dst.fp16[i] := a[15:0] +ENDFOR +dst[MAX:512] := 0 + + AVX512_FP16 +
immintrin.h
+ Set
- - - Broadcast half-precision (16-bit) complex floating-point value "a" to all - elements of "dst". - - FOR i := 0 to 3 - dst.fp16[2*i+0] := a[15:0] - dst.fp16[2*i+1] := a[31:16] - ENDFOR - dst[MAX:128] := 0 - - AVX512_FP16 -
immintrin.h
- Set + + + Broadcast half-precision (16-bit) complex floating-point value "a" to all elements of "dst". + +FOR i := 0 to 3 + dst.fp16[2*i+0] := a[15:0] + dst.fp16[2*i+1] := a[31:16] +ENDFOR +dst[MAX:128] := 0 + + AVX512_FP16 +
immintrin.h
+ Set
- - - Broadcast half-precision (16-bit) complex floating-point value "a" to all - elements of "dst". - - FOR i := 0 to 7 - dst.fp16[2*i+0] := a[15:0] - dst.fp16[2*i+1] := a[31:16] - ENDFOR - dst[MAX:256] := 0 - - AVX512_FP16 -
immintrin.h
- Set + + + Broadcast half-precision (16-bit) complex floating-point value "a" to all elements of "dst". + +FOR i := 0 to 7 + dst.fp16[2*i+0] := a[15:0] + dst.fp16[2*i+1] := a[31:16] +ENDFOR +dst[MAX:256] := 0 + + AVX512_FP16 +
immintrin.h
+ Set
- - - Broadcast half-precision (16-bit) complex floating-point value "a" to all - elements of "dst". - - FOR i := 0 to 15 - dst.fp16[2*i+0] := a[15:0] - dst.fp16[2*i+1] := a[31:16] - ENDFOR - dst[MAX:512] := 0 - - AVX512_FP16 -
immintrin.h
- Set + + + Broadcast half-precision (16-bit) complex floating-point value "a" to all elements of "dst". + +FOR i := 0 to 15 + dst.fp16[2*i+0] := a[15:0] + dst.fp16[2*i+1] := a[31:16] +ENDFOR +dst[MAX:512] := 0 + + AVX512_FP16 +
immintrin.h
+ Set
- - - Copy half-precision (16-bit) floating-point element "a" to the lower element of - "dst", and zero the upper 7 elements. - - dst.fp16[0] := a[15:0] - dst[127:16] := 0 - - AVX512_FP16 -
immintrin.h
- Set + + + Copy half-precision (16-bit) floating-point element "a" to the lower element of "dst", and zero the upper 7 elements. + +dst.fp16[0] := a[15:0] +dst[127:16] := 0 + + AVX512_FP16 +
immintrin.h
+ Set
- - Return vector of type __m512h with all elements set to zero. - - dst[MAX:0] := 0 - - - AVX512_FP16 -
immintrin.h
- Set + + Return vector of type __m512h with all elements set to zero. + +dst[MAX:0] := 0 + + + AVX512_FP16 +
immintrin.h
+ Set
- - - Cast vector of type "__m128h" to type "__m128". This intrinsic is only used for - compilation and does not generate any instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m128h" to type "__m128". This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - - Cast vector of type "__m256h" to type "__m256". This intrinsic is only used for - compilation and does not generate any instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m256h" to type "__m256". This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - - Cast vector of type "__m512h" to type "__m512". This intrinsic is only used for - compilation and does not generate any instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m512h" to type "__m512". This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - - Cast vector of type "__m128h" to type "__m128d". This intrinsic is only used - for compilation and does not generate any instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m128h" to type "__m128d". This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - - Cast vector of type "__m256h" to type "__m256d". This intrinsic is only used - for compilation and does not generate any instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m256h" to type "__m256d". This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - - Cast vector of type "__m512h" to type "__m512d". This intrinsic is only used - for compilation and does not generate any instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m512h" to type "__m512d". This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - - Cast vector of type "__m128h" to type "__m128i". This intrinsic is only used - for compilation and does not generate any instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m128h" to type "__m128i". This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - - Cast vector of type "__m256h" to type "__m256i". This intrinsic is only used - for compilation and does not generate any instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m256h" to type "__m256i". This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - - Cast vector of type "__m512h" to type "__m512i". This intrinsic is only used - for compilation and does not generate any instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m512h" to type "__m512i". This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - - Cast vector of type "__m128" to type "__m128h". This intrinsic is only used for - compilation and does not generate any instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m128" to type "__m128h". This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - - Cast vector of type "__m256" to type "__m256h". This intrinsic is only used for - compilation and does not generate any instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m256" to type "__m256h". This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - - Cast vector of type "__m512" to type "__m512h". This intrinsic is only used for - compilation and does not generate any instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m512" to type "__m512h". This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - - Cast vector of type "__m128d" to type "__m128h". This intrinsic is only used - for compilation and does not generate any instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m128d" to type "__m128h". This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - - Cast vector of type "__m256d" to type "__m256h". This intrinsic is only used - for compilation and does not generate any instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m256d" to type "__m256h". This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - - Cast vector of type "__m512d" to type "__m512h". This intrinsic is only used - for compilation and does not generate any instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m512d" to type "__m512h". This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - - Cast vector of type "__m128i" to type "__m128h". This intrinsic is only used - for compilation and does not generate any instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m128i" to type "__m128h". This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - - Cast vector of type "__m256i" to type "__m256h". This intrinsic is only used - for compilation and does not generate any instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m256i" to type "__m256h". This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - - Cast vector of type "__m512i" to type "__m512h". This intrinsic is only used - for compilation and does not generate any instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m512i" to type "__m512h". This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - - Cast vector of type "__m256h" to type "__m128h". This intrinsic is only used - for compilation and does not generate any instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m256h" to type "__m128h". This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - - Cast vector of type "__m512h" to type "__m128h". This intrinsic is only used - for compilation and does not generate any instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m512h" to type "__m128h". This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - - Cast vector of type "__m512h" to type "__m256h". This intrinsic is only used - for compilation and does not generate any instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m512h" to type "__m256h". This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - - Cast vector of type "__m128h" to type "__m256h". This intrinsic is only used - for compilation and does not generate any instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m128h" to type "__m256h". This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - - Cast vector of type "__m128h" to type "__m512h". This intrinsic is only used - for compilation and does not generate any instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m128h" to type "__m512h". This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - - Cast vector of type "__m256h" to type "__m512h". This intrinsic is only used - for compilation and does not generate any instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m256h" to type "__m512h". This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - - Cast vector of type "__m128h" to type "__m256h"; the upper 128 bits of the - result are zeroed. This intrinsic is only used for compilation and does not generate any - instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m128h" to type "__m256h"; the upper 128 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - - Cast vector of type "__m128h" to type "__m512h"; the upper 128 bits of the - result are zeroed. This intrinsic is only used for compilation and does not generate any - instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m128h" to type "__m512h"; the upper 128 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - - Cast vector of type "__m256h" to type "__m512h"; the upper 128 bits of the - result are zeroed. This intrinsic is only used for compilation and does not generate any - instructions, thus it has zero latency. - AVX512_FP16 -
immintrin.h
- Cast + + + Cast vector of type "__m256h" to type "__m512h"; the upper 128 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + AVX512_FP16 +
immintrin.h
+ Cast
- - Return vector of type __m512h with undefined elements. - AVX512_FP16 -
immintrin.h
- General Support -
- - - - - - - For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular - shift control within the corresponding 64-bit element of "a", and store the 8 assembled - bytes to the corresponding 64-bit element of "dst". - - FOR i := 0 to 3 - q := i * 64 - FOR j := 0 to 7 - tmp8 := 0 - ctrl := a[q+j*8+7:q+j*8] & 63 - FOR l := 0 to 7 + + Return vector of type __m512h with undefined elements. + AVX512_FP16 +
immintrin.h
+ General Support +
+ + + + + + + For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst". + +FOR i := 0 to 3 + q := i * 64 + FOR j := 0 to 7 + tmp8 := 0 + ctrl := a[q+j*8+7:q+j*8] & 63 + FOR l := 0 to 7 tmp8[l] := b[q+((ctrl+l) & 63)] - ENDFOR - dst[q+j*8+7:q+j*8] := tmp8[7:0] - ENDFOR - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI - AVX512VL -
immintrin.h
- Bit Manipulation + ENDFOR + dst[q+j*8+7:q+j*8] := tmp8[7:0] + ENDFOR +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - - - For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular - shift control within the corresponding 64-bit element of "a", and store the 8 assembled - bytes to the corresponding 64-bit element of "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR i := 0 to 3 - q := i * 64 - FOR j := 0 to 7 - tmp8 := 0 - ctrl := a[q+j*8+7:q+j*8] & 63 - FOR l := 0 to 7 + + + + + + For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR i := 0 to 3 + q := i * 64 + FOR j := 0 to 7 + tmp8 := 0 + ctrl := a[q+j*8+7:q+j*8] & 63 + FOR l := 0 to 7 tmp8[l] := b[q+((ctrl+l) & 63)] - ENDFOR - IF k[i*8+j] + ENDFOR + IF k[i*8+j] dst[q+j*8+7:q+j*8] := tmp8[7:0] - ELSE + ELSE dst[q+j*8+7:q+j*8] := src[q+j*8+7:q+j*8] - FI - ENDFOR - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI - AVX512VL -
immintrin.h
- Bit Manipulation + FI + ENDFOR +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512_VBMI + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - - For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular - shift control within the corresponding 64-bit element of "a", and store the 8 assembled - bytes to the corresponding 64-bit element of "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR i := 0 to 3 - q := i * 64 - FOR j := 0 to 7 - tmp8 := 0 - ctrl := a[q+j*8+7:q+j*8] & 63 - FOR l := 0 to 7 + + + + + For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR i := 0 to 3 + q := i * 64 + FOR j := 0 to 7 + tmp8 := 0 + ctrl := a[q+j*8+7:q+j*8] & 63 + FOR l := 0 to 7 tmp8[l] := b[q+((ctrl+l) & 63)] - ENDFOR - IF k[i*8+j] + ENDFOR + IF k[i*8+j] dst[q+j*8+7:q+j*8] := tmp8[7:0] - ELSE + ELSE dst[q+j*8+7:q+j*8] := 0 - FI - ENDFOR - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI - AVX512VL -
immintrin.h
- Bit Manipulation + FI + ENDFOR +ENDFOR +dst[MAX:256] := 0 +
+ + AVX512_VBMI + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular - shift control within the corresponding 64-bit element of "a", and store the 8 assembled - bytes to the corresponding 64-bit element of "dst". - - FOR i := 0 to 1 - q := i * 64 - FOR j := 0 to 7 - tmp8 := 0 - ctrl := a[q+j*8+7:q+j*8] & 63 - FOR l := 0 to 7 + + + + For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst". + +FOR i := 0 to 1 + q := i * 64 + FOR j := 0 to 7 + tmp8 := 0 + ctrl := a[q+j*8+7:q+j*8] & 63 + FOR l := 0 to 7 tmp8[l] := b[q+((ctrl+l) & 63)] - ENDFOR - dst[q+j*8+7:q+j*8] := tmp8[7:0] - ENDFOR - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI - AVX512VL -
immintrin.h
- Bit Manipulation + ENDFOR + dst[q+j*8+7:q+j*8] := tmp8[7:0] + ENDFOR +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512_VBMI + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - - - For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular - shift control within the corresponding 64-bit element of "a", and store the 8 assembled - bytes to the corresponding 64-bit element of "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR i := 0 to 1 - q := i * 64 - FOR j := 0 to 7 - tmp8 := 0 - ctrl := a[q+j*8+7:q+j*8] & 63 - FOR l := 0 to 7 + + + + + + For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR i := 0 to 1 + q := i * 64 + FOR j := 0 to 7 + tmp8 := 0 + ctrl := a[q+j*8+7:q+j*8] & 63 + FOR l := 0 to 7 tmp8[l] := b[q+((ctrl+l) & 63)] - ENDFOR - IF k[i*8+j] + ENDFOR + IF k[i*8+j] dst[q+j*8+7:q+j*8] := tmp8[7:0] - ELSE + ELSE dst[q+j*8+7:q+j*8] := src[q+j*8+7:q+j*8] - FI - ENDFOR - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI - AVX512VL -
immintrin.h
- Bit Manipulation + FI + ENDFOR +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512_VBMI + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - - For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular - shift control within the corresponding 64-bit element of "a", and store the 8 assembled - bytes to the corresponding 64-bit element of "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR i := 0 to 1 - q := i * 64 - FOR j := 0 to 7 - tmp8 := 0 - ctrl := a[q+j*8+7:q+j*8] & 63 - FOR l := 0 to 7 + + + + + For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR i := 0 to 1 + q := i * 64 + FOR j := 0 to 7 + tmp8 := 0 + ctrl := a[q+j*8+7:q+j*8] & 63 + FOR l := 0 to 7 tmp8[l] := b[q+((ctrl+l) & 63)] - ENDFOR - IF k[i*8+j] + ENDFOR + IF k[i*8+j] dst[q+j*8+7:q+j*8] := tmp8[7:0] - ELSE + ELSE dst[q+j*8+7:q+j*8] := 0 - FI - ENDFOR - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI - AVX512VL -
immintrin.h
- Bit Manipulation + FI + ENDFOR +ENDFOR +dst[MAX:128] := 0 +
+ + AVX512_VBMI + AVX512VL +
immintrin.h
+ Bit Manipulation
- - - - Shuffle 8-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst". - - FOR j := 0 to 31 - i := j*8 - id := idx[i+4:i]*8 - dst[i+7:i] := a[id+7:id] - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI - AVX512VL -
immintrin.h
- Swizzle + + + + Shuffle 8-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + id := idx[i+4:i]*8 + dst[i+7:i] := a[id+7:id] +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI + AVX512VL +
immintrin.h
+ Swizzle
- - - - - - Shuffle 8-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - id := idx[i+4:i]*8 - IF k[j] - dst[i+7:i] := a[id+7:id] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI - AVX512VL -
immintrin.h
- Swizzle + + + + + + Shuffle 8-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + id := idx[i+4:i]*8 + IF k[j] + dst[i+7:i] := a[id+7:id] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI + AVX512VL +
immintrin.h
+ Swizzle
- - - - - Shuffle 8-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - id := idx[i+4:i]*8 - IF k[j] - dst[i+7:i] := a[id+7:id] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI - AVX512VL -
immintrin.h
- Swizzle + + + + + Shuffle 8-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + id := idx[i+4:i]*8 + IF k[j] + dst[i+7:i] := a[id+7:id] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI + AVX512VL +
immintrin.h
+ Swizzle
- - - - Shuffle 8-bit integers in "a" using the corresponding index in "idx", and store - the results in "dst". - - FOR j := 0 to 15 - i := j*8 - id := idx[i+3:i]*8 - dst[i+7:i] := a[id+7:id] - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI - AVX512VL -
immintrin.h
- Swizzle + + + + Shuffle 8-bit integers in "a" using the corresponding index in "idx", and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + id := idx[i+3:i]*8 + dst[i+7:i] := a[id+7:id] +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI + AVX512VL +
immintrin.h
+ Swizzle
- - - - - - Shuffle 8-bit integers in "a" using the corresponding index in "idx", and store - the results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - id := idx[i+3:i]*8 - IF k[j] - dst[i+7:i] := a[id+7:id] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI - AVX512VL -
immintrin.h
- Swizzle + + + + + + Shuffle 8-bit integers in "a" using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + id := idx[i+3:i]*8 + IF k[j] + dst[i+7:i] := a[id+7:id] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI + AVX512VL +
immintrin.h
+ Swizzle
- - - - - Shuffle 8-bit integers in "a" using the corresponding index in "idx", and store - the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding - mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - id := idx[i+3:i]*8 - IF k[j] - dst[i+7:i] := a[id+7:id] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI - AVX512VL -
immintrin.h
- Swizzle + + + + + Shuffle 8-bit integers in "a" using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + id := idx[i+3:i]*8 + IF k[j] + dst[i+7:i] := a[id+7:id] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI + AVX512VL +
immintrin.h
+ Swizzle
- - - - - Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst". - - FOR j := 0 to 31 - i := j*8 - off := 8*idx[i+4:i] - dst[i+7:i] := idx[i+5] ? b[off+7:off] : a[off+7:off] - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI - AVX512VL -
immintrin.h
- Swizzle + + + + + Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 31 + i := j*8 + off := 8*idx[i+4:i] + dst[i+7:i] := idx[i+5] ? b[off+7:off] : a[off+7:off] +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI + AVX512VL +
immintrin.h
+ Swizzle
- - - - - - Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst" using writemask "k" - (elements are copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - off := 8*idx[i+4:i] - dst[i+7:i] := idx[i+5] ? b[off+7:off] : a[off+7:off] - ELSE - dst[i+7:i] := a[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI - AVX512VL -
immintrin.h
- Swizzle + + + + + + Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + off := 8*idx[i+4:i] + dst[i+7:i] := idx[i+5] ? b[off+7:off] : a[off+7:off] + ELSE + dst[i+7:i] := a[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI + AVX512VL +
immintrin.h
+ Swizzle
- - - - - - Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst" using writemask "k" - (elements are copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - off := 8*idx[i+4:i] - dst[i+7:i] := idx[i+5] ? b[off+7:off] : a[off+7:off] - ELSE - dst[i+7:i] := idx[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI - AVX512VL -
immintrin.h
- Swizzle + + + + + + Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + off := 8*idx[i+4:i] + dst[i+7:i] := idx[i+5] ? b[off+7:off] : a[off+7:off] + ELSE + dst[i+7:i] := idx[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI + AVX512VL +
immintrin.h
+ Swizzle
- - - - - - Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*8 - IF k[j] - off := 8*idx[i+4:i] - dst[i+7:i] := idx[i+5] ? b[off+7:off] : a[off+7:off] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - - AVX512_VBMI - AVX512VL -
immintrin.h
- Swizzle + + + + + + Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*8 + IF k[j] + off := 8*idx[i+4:i] + dst[i+7:i] := idx[i+5] ? b[off+7:off] : a[off+7:off] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + + AVX512_VBMI + AVX512VL +
immintrin.h
+ Swizzle
- - - - - Shuffle 8-bit integers in "a" and "b" using the corresponding selector and - index in "idx", and store the results in "dst". - - FOR j := 0 to 15 - i := j*8 - off := 8*idx[i+3:i] - dst[i+7:i] := idx[i+4] ? b[off+7:off] : a[off+7:off] - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI - AVX512VL -
immintrin.h
- Swizzle + + + + + Shuffle 8-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + off := 8*idx[i+3:i] + dst[i+7:i] := idx[i+4] ? b[off+7:off] : a[off+7:off] +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI + AVX512VL +
immintrin.h
+ Swizzle
- - - - - - Shuffle 8-bit integers in "a" and "b" using the corresponding selector and - index in "idx", and store the results in "dst" using writemask "k" (elements are copied - from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - off := 8*idx[i+3:i] - dst[i+7:i] := idx[i+4] ? b[off+7:off] : a[off+7:off] - ELSE - dst[i+7:i] := a[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI - AVX512VL -
immintrin.h
- Swizzle -
- - - - - - - Shuffle 8-bit integers in "a" and "b" using the corresponding selector and - index in "idx", and store the results in "dst" using writemask "k" (elements are copied - from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - off := 8*idx[i+3:i] - dst[i+7:i] := idx[i+4] ? b[off+7:off] : a[off+7:off] - ELSE - dst[i+7:i] := idx[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI - AVX512VL -
immintrin.h
- Swizzle + + + + + + Shuffle 8-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + off := 8*idx[i+3:i] + dst[i+7:i] := idx[i+4] ? b[off+7:off] : a[off+7:off] + ELSE + dst[i+7:i] := a[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI + AVX512VL +
immintrin.h
+ Swizzle
- - - - - - - Shuffle 8-bit integers in "a" and "b" using the corresponding selector and - index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*8 - IF k[j] - off := 8*idx[i+3:i] - dst[i+7:i] := idx[i+4] ? b[off+7:off] : a[off+7:off] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - - AVX512_VBMI - AVX512VL -
immintrin.h
- Swizzle -
- - - - - - - For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular - shift control within the corresponding 64-bit element of "a", and store the 8 assembled - bytes to the corresponding 64-bit element of "dst". - - FOR i := 0 to 7 - q := i * 64 - FOR j := 0 to 7 - tmp8 := 0 - ctrl := a[q+j*8+7:q+j*8] & 63 - FOR l := 0 to 7 + + + + + + + Shuffle 8-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + off := 8*idx[i+3:i] + dst[i+7:i] := idx[i+4] ? b[off+7:off] : a[off+7:off] + ELSE + dst[i+7:i] := idx[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI + AVX512VL +
immintrin.h
+ Swizzle +
+ + + + + + + Shuffle 8-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*8 + IF k[j] + off := 8*idx[i+3:i] + dst[i+7:i] := idx[i+4] ? b[off+7:off] : a[off+7:off] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + + AVX512_VBMI + AVX512VL +
immintrin.h
+ Swizzle +
+ + + + + + + For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst". + +FOR i := 0 to 7 + q := i * 64 + FOR j := 0 to 7 + tmp8 := 0 + ctrl := a[q+j*8+7:q+j*8] & 63 + FOR l := 0 to 7 tmp8[l] := b[q+((ctrl+l) & 63)] - ENDFOR - dst[q+j*8+7:q+j*8] := tmp8[7:0] - ENDFOR - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI -
immintrin.h
- Bit Manipulation + ENDFOR + dst[q+j*8+7:q+j*8] := tmp8[7:0] + ENDFOR +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512_VBMI +
immintrin.h
+ Bit Manipulation
- - - - - - For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular - shift control within the corresponding 64-bit element of "a", and store the 8 assembled - bytes to the corresponding 64-bit element of "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - FOR i := 0 to 7 - q := i * 64 - FOR j := 0 to 7 - tmp8 := 0 - ctrl := a[q+j*8+7:q+j*8] & 63 - FOR l := 0 to 7 + + + + + + For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR i := 0 to 7 + q := i * 64 + FOR j := 0 to 7 + tmp8 := 0 + ctrl := a[q+j*8+7:q+j*8] & 63 + FOR l := 0 to 7 tmp8[l] := b[q+((ctrl+l) & 63)] - ENDFOR - IF k[i*8+j] + ENDFOR + IF k[i*8+j] dst[q+j*8+7:q+j*8] := tmp8[7:0] - ELSE + ELSE dst[q+j*8+7:q+j*8] := src[q+j*8+7:q+j*8] - FI - ENDFOR - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI -
immintrin.h
- Bit Manipulation + FI + ENDFOR +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512_VBMI +
immintrin.h
+ Bit Manipulation
- - - - - For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular - shift control within the corresponding 64-bit element of "a", and store the 8 assembled - bytes to the corresponding 64-bit element of "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR i := 0 to 7 - q := i * 64 - FOR j := 0 to 7 - tmp8 := 0 - ctrl := a[q+j*8+7:q+j*8] & 63 - FOR l := 0 to 7 + + + + + For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR i := 0 to 7 + q := i * 64 + FOR j := 0 to 7 + tmp8 := 0 + ctrl := a[q+j*8+7:q+j*8] & 63 + FOR l := 0 to 7 tmp8[l] := b[q+((ctrl+l) & 63)] - ENDFOR - IF k[i*8+j] + ENDFOR + IF k[i*8+j] dst[q+j*8+7:q+j*8] := tmp8[7:0] - ELSE + ELSE dst[q+j*8+7:q+j*8] := 0 - FI - ENDFOR - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI -
immintrin.h
- Bit Manipulation + FI + ENDFOR +ENDFOR +dst[MAX:512] := 0 +
+ + AVX512_VBMI +
immintrin.h
+ Bit Manipulation
- - - - Shuffle 8-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst". - - FOR j := 0 to 63 - i := j*8 - id := idx[i+5:i]*8 - dst[i+7:i] := a[id+7:id] - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI -
immintrin.h
- Swizzle + + + + Shuffle 8-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst". + +FOR j := 0 to 63 + i := j*8 + id := idx[i+5:i]*8 + dst[i+7:i] := a[id+7:id] +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI +
immintrin.h
+ Swizzle
- - - - - - Shuffle 8-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst" using writemask "k" (elements are copied from - "src" when the corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - id := idx[i+5:i]*8 - IF k[j] - dst[i+7:i] := a[id+7:id] - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI -
immintrin.h
- Swizzle + + + + + + Shuffle 8-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + id := idx[i+5:i]*8 + IF k[j] + dst[i+7:i] := a[id+7:id] + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI +
immintrin.h
+ Swizzle
- - - - - Shuffle 8-bit integers in "a" across lanes using the corresponding index in - "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when - the corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - id := idx[i+5:i]*8 - IF k[j] - dst[i+7:i] := a[id+7:id] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI -
immintrin.h
- Swizzle + + + + + Shuffle 8-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + id := idx[i+5:i]*8 + IF k[j] + dst[i+7:i] := a[id+7:id] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI +
immintrin.h
+ Swizzle
- - - - - Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst". - - FOR j := 0 to 63 - i := j*8 - off := 8*idx[i+5:i] - dst[i+7:i] := idx[i+6] ? b[off+7:off] : a[off+7:off] - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI -
immintrin.h
- Swizzle + + + + + Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst". + +FOR j := 0 to 63 + i := j*8 + off := 8*idx[i+5:i] + dst[i+7:i] := idx[i+6] ? b[off+7:off] : a[off+7:off] +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI +
immintrin.h
+ Swizzle
- - - - - - Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst" using writemask "k" - (elements are copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - off := 8*idx[i+5:i] - dst[i+7:i] := idx[i+6] ? b[off+7:off] : a[off+7:off] - ELSE - dst[i+7:i] := a[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI -
immintrin.h
- Swizzle + + + + + + Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + off := 8*idx[i+5:i] + dst[i+7:i] := idx[i+6] ? b[off+7:off] : a[off+7:off] + ELSE + dst[i+7:i] := a[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI +
immintrin.h
+ Swizzle
- - - - - - Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst" using writemask "k" - (elements are copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - off := 8*idx[i+5:i] - dst[i+7:i] := idx[i+6] ? b[off+7:off] : a[off+7:off] - ELSE - dst[i+7:i] := idx[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI -
immintrin.h
- Swizzle + + + + + + Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + off := 8*idx[i+5:i] + dst[i+7:i] := idx[i+6] ? b[off+7:off] : a[off+7:off] + ELSE + dst[i+7:i] := idx[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI +
immintrin.h
+ Swizzle
- - - - - - Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding - selector and index in "idx", and store the results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 63 - i := j*8 - IF k[j] - off := 8*idx[i+5:i] - dst[i+7:i] := idx[i+6] ? b[off+7:off] : a[off+7:off] - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - - AVX512_VBMI -
immintrin.h
- Swizzle -
- - - - - - - - - Concatenate packed 64-bit integers in "b" and "a" producing an intermediate - 128-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 64-bits in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> (c[i+63:i] & - 63) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 63 + i := j*8 + IF k[j] + off := 8*idx[i+5:i] + dst[i+7:i] := idx[i+6] ? b[off+7:off] : a[off+7:off] + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + + AVX512_VBMI +
immintrin.h
+ Swizzle +
+ + + + + + + + + Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 64-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> (c[i+63:i] & 63) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 64-bit integers in "b" and "a" producing an intermediate - 128-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 64-bits in "dst" using writemask "k" (elements are - copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> (c[i+63:i] & - 63) - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 64-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> (c[i+63:i] & 63) + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - Concatenate packed 64-bit integers in "b" and "a" producing an intermediate - 128-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 64-bits in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> (c[i+63:i] & - 63) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 64-bits in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> (c[i+63:i] & 63) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 64-bit integers in "b" and "a" producing an intermediate - 128-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 64-bits in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> (c[i+63:i] & - 63) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 64-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> (c[i+63:i] & 63) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 64-bit integers in "b" and "a" producing an intermediate - 128-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 64-bits in "dst" using writemask "k" (elements are - copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> (c[i+63:i] & - 63) - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 64-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> (c[i+63:i] & 63) + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - Concatenate packed 64-bit integers in "b" and "a" producing an intermediate - 128-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 64-bits in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> (c[i+63:i] & - 63) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 64-bits in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> (c[i+63:i] & 63) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 32-bit integers in "b" and "a" producing an intermediate - 64-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 32-bits in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> (c[i+31:i] & 31) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 32-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> (c[i+31:i] & 31) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 32-bit integers in "b" and "a" producing an intermediate - 64-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 32-bits in "dst" using writemask "k" (elements are - copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> (c[i+31:i] & 31) - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 32-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> (c[i+31:i] & 31) + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - Concatenate packed 32-bit integers in "b" and "a" producing an intermediate - 64-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 32-bits in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> (c[i+31:i] & 31) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 32-bits in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> (c[i+31:i] & 31) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 32-bit integers in "b" and "a" producing an intermediate - 64-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 32-bits in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> (c[i+31:i] & 31) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 32-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> (c[i+31:i] & 31) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 32-bit integers in "b" and "a" producing an intermediate - 64-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 32-bits in "dst" using writemask "k" (elements are - copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> (c[i+31:i] & 31) - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 32-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> (c[i+31:i] & 31) + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - Concatenate packed 32-bit integers in "b" and "a" producing an intermediate - 64-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 32-bits in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> (c[i+31:i] & 31) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 32-bits in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> (c[i+31:i] & 31) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 16-bit integers in "b" and "a" producing an intermediate - 32-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 16-bits in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> (c[i+15:i] & 15) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 16-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> (c[i+15:i] & 15) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 16-bit integers in "b" and "a" producing an intermediate - 32-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 16-bits in "dst" using writemask "k" (elements are - copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> (c[i+15:i] & 15) - ELSE - dst[i+15:i] := a[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 16-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> (c[i+15:i] & 15) + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - Concatenate packed 16-bit integers in "b" and "a" producing an intermediate - 32-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 16-bits in "dst". - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> (c[i+15:i] & 15) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 16-bits in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> (c[i+15:i] & 15) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 16-bit integers in "b" and "a" producing an intermediate - 32-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 16-bits in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> (c[i+15:i] & 15) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 16-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> (c[i+15:i] & 15) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 16-bit integers in "b" and "a" producing an intermediate - 32-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 16-bits in "dst" using writemask "k" (elements are - copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> (c[i+15:i] & 15) - ELSE - dst[i+15:i] := a[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 16-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> (c[i+15:i] & 15) + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - Concatenate packed 16-bit integers in "b" and "a" producing an intermediate - 32-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 16-bits in "dst". - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> (c[i+15:i] & 15) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 16-bits in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> (c[i+15:i] & 15) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 64-bit integers in "b" and "a" producing an intermediate - 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> imm8[5:0] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> imm8[5:0] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - - Concatenate packed 64-bit integers in "b" and "a" producing an intermediate - 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in - "dst" using writemask "k" (elements are copied from "src"" when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> imm8[5:0] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + + Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in "dst" using writemask "k" (elements are copied from "src"" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> imm8[5:0] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - Concatenate packed 64-bit integers in "b" and "a" producing an intermediate - 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in - "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> imm8[5:0] - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> imm8[5:0] +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 64-bit integers in "b" and "a" producing an intermediate - 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> imm8[5:0] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> imm8[5:0] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - - Concatenate packed 64-bit integers in "b" and "a" producing an intermediate - 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in - "dst" using writemask "k" (elements are copied from "src"" when the corresponding mask - bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> imm8[5:0] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + + Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in "dst" using writemask "k" (elements are copied from "src"" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> imm8[5:0] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - Concatenate packed 64-bit integers in "b" and "a" producing an intermediate - 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in - "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> imm8[5:0] - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> imm8[5:0] +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 32-bit integers in "b" and "a" producing an intermediate - 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> imm8[4:0] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> imm8[4:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - - Concatenate packed 32-bit integers in "b" and "a" producing an intermediate - 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> imm8[4:0] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + + Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> imm8[4:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - Concatenate packed 32-bit integers in "b" and "a" producing an intermediate - 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in - "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> imm8[4:0] - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> imm8[4:0] +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 32-bit integers in "b" and "a" producing an intermediate - 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> imm8[4:0] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> imm8[4:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - - Concatenate packed 32-bit integers in "b" and "a" producing an intermediate - 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> imm8[4:0] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + + Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> imm8[4:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - Concatenate packed 32-bit integers in "b" and "a" producing an intermediate - 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in - "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> imm8[4:0] - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> imm8[4:0] +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 16-bit integers in "b" and "a" producing an intermediate - 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> imm8[3:0] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> imm8[3:0] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - - Concatenate packed 16-bit integers in "b" and "a" producing an intermediate - 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> imm8[3:0] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + + Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> imm8[3:0] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - Concatenate packed 16-bit integers in "b" and "a" producing an intermediate - 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in - "dst". - - FOR j := 0 to 15 - i := j*16 - dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> imm8[3:0] - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in "dst". + +FOR j := 0 to 15 + i := j*16 + dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> imm8[3:0] +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 16-bit integers in "b" and "a" producing an intermediate - 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> imm8[3:0] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> imm8[3:0] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - - Concatenate packed 16-bit integers in "b" and "a" producing an intermediate - 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> imm8[3:0] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + + Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> imm8[3:0] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - Concatenate packed 16-bit integers in "b" and "a" producing an intermediate - 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in - "dst". - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> imm8[3:0] - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> imm8[3:0] +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 64-bit integers in "a" and "b" producing an intermediate - 128-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 64-bits in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << (c[i+63:i] & 63) - dst[i+63:i] := tmp[127:64] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 64-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << (c[i+63:i] & 63) + dst[i+63:i] := tmp[127:64] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 64-bit integers in "a" and "b" producing an intermediate - 128-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 64-bits in "dst" using writemask "k" (elements are - copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << (c[i+63:i] & 63) - dst[i+63:i] := tmp[127:64] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 64-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << (c[i+63:i] & 63) + dst[i+63:i] := tmp[127:64] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - Concatenate packed 64-bit integers in "a" and "b" producing an intermediate - 128-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 64-bits in "dst". - - FOR j := 0 to 3 - i := j*64 - tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << (c[i+63:i] & 63) - dst[i+63:i] := tmp[127:64] - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 64-bits in "dst". + +FOR j := 0 to 3 + i := j*64 + tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << (c[i+63:i] & 63) + dst[i+63:i] := tmp[127:64] +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 64-bit integers in "a" and "b" producing an intermediate - 128-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 64-bits in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << (c[i+63:i] & 63) - dst[i+63:i] := tmp[127:64] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 64-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << (c[i+63:i] & 63) + dst[i+63:i] := tmp[127:64] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 64-bit integers in "a" and "b" producing an intermediate - 128-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 64-bits in "dst" using writemask "k" (elements are - copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << (c[i+63:i] & 63) - dst[i+63:i] := tmp[127:64] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 64-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << (c[i+63:i] & 63) + dst[i+63:i] := tmp[127:64] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - Concatenate packed 64-bit integers in "a" and "b" producing an intermediate - 128-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 64-bits in "dst". - - FOR j := 0 to 1 - i := j*64 - tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << (c[i+63:i] & 63) - dst[i+63:i] := tmp[127:64] - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 64-bits in "dst". + +FOR j := 0 to 1 + i := j*64 + tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << (c[i+63:i] & 63) + dst[i+63:i] := tmp[127:64] +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 32-bit integers in "a" and "b" producing an intermediate - 64-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 32-bits in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << (c[i+31:i] & 31) - dst[i+31:i] := tmp[63:32] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 32-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << (c[i+31:i] & 31) + dst[i+31:i] := tmp[63:32] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 32-bit integers in "a" and "b" producing an intermediate - 64-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 32-bits in "dst" using writemask "k" (elements are - copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << (c[i+31:i] & 31) - dst[i+31:i] := tmp[63:32] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 32-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << (c[i+31:i] & 31) + dst[i+31:i] := tmp[63:32] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - Concatenate packed 32-bit integers in "a" and "b" producing an intermediate - 64-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 32-bits in "dst". - - FOR j := 0 to 7 - i := j*32 - tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << (c[i+31:i] & 31) - dst[i+31:i] := tmp[63:32] - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 32-bits in "dst". + +FOR j := 0 to 7 + i := j*32 + tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << (c[i+31:i] & 31) + dst[i+31:i] := tmp[63:32] +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 32-bit integers in "a" and "b" producing an intermediate - 64-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 32-bits in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << (c[i+31:i] & 31) - dst[i+31:i] := tmp[63:32] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 32-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << (c[i+31:i] & 31) + dst[i+31:i] := tmp[63:32] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 32-bit integers in "a" and "b" producing an intermediate - 64-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 32-bits in "dst" using writemask "k" (elements are - copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << (c[i+31:i] & 31) - dst[i+31:i] := tmp[63:32] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 32-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << (c[i+31:i] & 31) + dst[i+31:i] := tmp[63:32] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - Concatenate packed 32-bit integers in "a" and "b" producing an intermediate - 64-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 32-bits in "dst". - - FOR j := 0 to 3 - i := j*32 - tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << (c[i+31:i] & 31) - dst[i+31:i] := tmp[63:32] - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 32-bits in "dst". + +FOR j := 0 to 3 + i := j*32 + tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << (c[i+31:i] & 31) + dst[i+31:i] := tmp[63:32] +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 16-bit integers in "a" and "b" producing an intermediate - 32-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 16-bits in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << (c[i+15:i] & 15) - dst[i+15:i] := tmp[31:16] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 16-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << (c[i+15:i] & 15) + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 16-bit integers in "a" and "b" producing an intermediate - 32-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 16-bits in "dst" using writemask "k" (elements are - copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << (c[i+15:i] & 15) - dst[i+15:i] := tmp[31:16] - ELSE - dst[i+15:i] := a[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 16-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << (c[i+15:i] & 15) + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - Concatenate packed 16-bit integers in "a" and "b" producing an intermediate - 32-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 16-bits in "dst". - - FOR j := 0 to 15 - i := j*16 - tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << (c[i+15:i] & 15) - dst[i+15:i] := tmp[31:16] - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 16-bits in "dst". + +FOR j := 0 to 15 + i := j*16 + tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << (c[i+15:i] & 15) + dst[i+15:i] := tmp[31:16] +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 16-bit integers in "a" and "b" producing an intermediate - 32-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 16-bits in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << (c[i+15:i] & 15) - dst[i+15:i] := tmp[31:16] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 16-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << (c[i+15:i] & 15) + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 16-bit integers in "a" and "b" producing an intermediate - 32-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 16-bits in "dst" using writemask "k" (elements are - copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << (c[i+15:i] & 15) - dst[i+15:i] := tmp[31:16] - ELSE - dst[i+15:i] := a[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 16-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << (c[i+15:i] & 15) + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - Concatenate packed 16-bit integers in "a" and "b" producing an intermediate - 32-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 16-bits in "dst". - - FOR j := 0 to 7 - i := j*16 - tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << (c[i+15:i] & 15) - dst[i+15:i] := tmp[31:16] - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 16-bits in "dst". + +FOR j := 0 to 7 + i := j*16 + tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << (c[i+15:i] & 15) + dst[i+15:i] := tmp[31:16] +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 64-bit integers in "a" and "b" producing an intermediate - 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << imm8[5:0] - dst[i+63:i] := tmp[127:64] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << imm8[5:0] + dst[i+63:i] := tmp[127:64] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - - Concatenate packed 64-bit integers in "a" and "b" producing an intermediate - 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*64 - IF k[j] - tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << imm8[5:0] - dst[i+63:i] := tmp[127:64] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + + Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*64 + IF k[j] + tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << imm8[5:0] + dst[i+63:i] := tmp[127:64] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - Concatenate packed 64-bit integers in "a" and "b" producing an intermediate - 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in - "dst"). - - FOR j := 0 to 3 - i := j*64 - tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << imm8[5:0] - dst[i+63:i] := tmp[127:64] - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in "dst"). + +FOR j := 0 to 3 + i := j*64 + tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << imm8[5:0] + dst[i+63:i] := tmp[127:64] +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 64-bit integers in "a" and "b" producing an intermediate - 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << imm8[5:0] - dst[i+63:i] := tmp[127:64] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << imm8[5:0] + dst[i+63:i] := tmp[127:64] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - - Concatenate packed 64-bit integers in "a" and "b" producing an intermediate - 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 1 - i := j*64 - IF k[j] - tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << imm8[5:0] - dst[i+63:i] := tmp[127:64] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + + Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 1 + i := j*64 + IF k[j] + tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << imm8[5:0] + dst[i+63:i] := tmp[127:64] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - Concatenate packed 64-bit integers in "a" and "b" producing an intermediate - 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in - "dst"). - - FOR j := 0 to 1 - i := j*64 - tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << imm8[5:0] - dst[i+63:i] := tmp[127:64] - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in "dst"). + +FOR j := 0 to 1 + i := j*64 + tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << imm8[5:0] + dst[i+63:i] := tmp[127:64] +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 32-bit integers in "a" and "b" producing an intermediate - 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << imm8[4:0] - dst[i+31:i] := tmp[63:32] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << imm8[4:0] + dst[i+31:i] := tmp[63:32] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - - Concatenate packed 32-bit integers in "a" and "b" producing an intermediate - 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*32 - IF k[j] - tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << imm8[4:0] - dst[i+31:i] := tmp[63:32] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + + Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*32 + IF k[j] + tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << imm8[4:0] + dst[i+31:i] := tmp[63:32] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - Concatenate packed 32-bit integers in "a" and "b" producing an intermediate - 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in - "dst". - - FOR j := 0 to 7 - i := j*32 - tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << imm8[4:0] - dst[i+31:i] := tmp[63:32] - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in "dst". + +FOR j := 0 to 7 + i := j*32 + tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << imm8[4:0] + dst[i+31:i] := tmp[63:32] +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 32-bit integers in "a" and "b" producing an intermediate - 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << imm8[4:0] - dst[i+31:i] := tmp[63:32] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << imm8[4:0] + dst[i+31:i] := tmp[63:32] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - - Concatenate packed 32-bit integers in "a" and "b" producing an intermediate - 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 3 - i := j*32 - IF k[j] - tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << imm8[4:0] - dst[i+31:i] := tmp[63:32] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + + Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + i := j*32 + IF k[j] + tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << imm8[4:0] + dst[i+31:i] := tmp[63:32] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - Concatenate packed 32-bit integers in "a" and "b" producing an intermediate - 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in - "dst". - - FOR j := 0 to 3 - i := j*32 - tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << imm8[4:0] - dst[i+31:i] := tmp[63:32] - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in "dst". + +FOR j := 0 to 3 + i := j*32 + tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << imm8[4:0] + dst[i+31:i] := tmp[63:32] +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 16-bit integers in "a" and "b" producing an intermediate - 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << imm8[3:0] - dst[i+15:i] := tmp[31:16] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << imm8[3:0] + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - - Concatenate packed 16-bit integers in "a" and "b" producing an intermediate - 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*16 - IF k[j] - tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << imm8[3:0] - dst[i+15:i] := tmp[31:16] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + + Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*16 + IF k[j] + tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << imm8[3:0] + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - Concatenate packed 16-bit integers in "a" and "b" producing an intermediate - 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in - "dst"). - - FOR j := 0 to 15 - i := j*16 - tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << imm8[3:0] - dst[i+15:i] := tmp[31:16] - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in "dst"). + +FOR j := 0 to 15 + i := j*16 + tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << imm8[3:0] + dst[i+15:i] := tmp[31:16] +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - Concatenate packed 16-bit integers in "a" and "b" producing an intermediate - 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << imm8[3:0] - dst[i+15:i] := tmp[31:16] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << imm8[3:0] + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - - - Concatenate packed 16-bit integers in "a" and "b" producing an intermediate - 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*16 - IF k[j] - tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << imm8[3:0] - dst[i+15:i] := tmp[31:16] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + + + Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*16 + IF k[j] + tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << imm8[3:0] + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- - - - - Concatenate packed 16-bit integers in "a" and "b" producing an intermediate - 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in - "dst"). - - FOR j := 0 to 7 - i := j*16 - tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << imm8[3:0] - dst[i+15:i] := tmp[31:16] - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Shift + + + + + Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in "dst"). + +FOR j := 0 to 7 + i := j*16 + tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << imm8[3:0] + dst[i+15:i] := tmp[31:16] +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Shift
- Swizzle - - - - Load contiguous active 16-bit integers from unaligned memory at "mem_addr" - (those with their respective bit set in mask "k"), and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := MEM[mem_addr+m+15:mem_addr+m] - m := m + 16 - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Load + Swizzle + + + + Load contiguous active 16-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := MEM[mem_addr+m+15:mem_addr+m] + m := m + 16 + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Load
- - Swizzle - - - - - Load contiguous active 16-bit integers from unaligned memory at "mem_addr" - (those with their respective bit set in mask "k"), and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - m := 0 - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := MEM[mem_addr+m+15:mem_addr+m] - m := m + 16 - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Load + + Swizzle + + + + + Load contiguous active 16-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := MEM[mem_addr+m+15:mem_addr+m] + m := m + 16 + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Load
- Swizzle - - - - Load contiguous active 16-bit integers from unaligned memory at "mem_addr" - (those with their respective bit set in mask "k"), and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := MEM[mem_addr+m+15:mem_addr+m] - m := m + 16 - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Load + Swizzle + + + + Load contiguous active 16-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := MEM[mem_addr+m+15:mem_addr+m] + m := m + 16 + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Load
- Swizzle - - - - - Load contiguous active 16-bit integers from unaligned memory at "mem_addr" - (those with their respective bit set in mask "k"), and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - m := 0 - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := MEM[mem_addr+m+15:mem_addr+m] - m := m + 16 - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Load + Swizzle + + + + + Load contiguous active 16-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := MEM[mem_addr+m+15:mem_addr+m] + m := m + 16 + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Load
- Swizzle - - - - Load contiguous active 8-bit integers from unaligned memory at "mem_addr" - (those with their respective bit set in mask "k"), and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := MEM[mem_addr+m+7:mem_addr+m] - m := m + 8 - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Load + Swizzle + + + + Load contiguous active 8-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := MEM[mem_addr+m+7:mem_addr+m] + m := m + 8 + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Load
- Swizzle - - - - - Load contiguous active 8-bit integers from unaligned memory at "mem_addr" - (those with their respective bit set in mask "k"), and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - m := 0 - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := MEM[mem_addr+m+7:mem_addr+m] - m := m + 8 - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Load + Swizzle + + + + + Load contiguous active 8-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := MEM[mem_addr+m+7:mem_addr+m] + m := m + 8 + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Load
- Swizzle - - - - Load contiguous active 8-bit integers from unaligned memory at "mem_addr" - (those with their respective bit set in mask "k"), and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := MEM[mem_addr+m+7:mem_addr+m] - m := m + 8 - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Load + Swizzle + + + + Load contiguous active 8-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := MEM[mem_addr+m+7:mem_addr+m] + m := m + 8 + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Load
- Swizzle - - - - - Load contiguous active 8-bit integers from unaligned memory at "mem_addr" - (those with their respective bit set in mask "k"), and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - m := 0 - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := MEM[mem_addr+m+7:mem_addr+m] - m := m + 8 - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Load + Swizzle + + + + + Load contiguous active 8-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := MEM[mem_addr+m+7:mem_addr+m] + m := m + 8 + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Load
- - - - Load contiguous active 16-bit integers from "a" (those with their respective - bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := a[m+15:m] - m := m + 16 - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Swizzle + + + + Load contiguous active 16-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := a[m+15:m] + m := m + 16 + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Swizzle
- - - - - Load contiguous active 16-bit integers from "a" (those with their respective - bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[i+15:i] := a[m+15:m] - m := m + 16 - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Swizzle + + + + + Load contiguous active 16-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[i+15:i] := a[m+15:m] + m := m + 16 + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Swizzle
- - - - Load contiguous active 16-bit integers from "a" (those with their respective - bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := a[m+15:m] - m := m + 16 - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Swizzle + + + + Load contiguous active 16-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := a[m+15:m] + m := m + 16 + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Swizzle
- - - - - Load contiguous active 16-bit integers from "a" (those with their respective - bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[i+15:i] := a[m+15:m] - m := m + 16 - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Swizzle + + + + + Load contiguous active 16-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[i+15:i] := a[m+15:m] + m := m + 16 + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Swizzle
- - - - Load contiguous active 8-bit integers from "a" (those with their respective bit - set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := a[m+7:m] - m := m + 8 - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Swizzle + + + + Load contiguous active 8-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := a[m+7:m] + m := m + 8 + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Swizzle
- - - - - Load contiguous active 8-bit integers from "a" (those with their respective bit - set in mask "k"), and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[i+7:i] := a[m+7:m] - m := m + 8 - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Swizzle + + + + + Load contiguous active 8-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[i+7:i] := a[m+7:m] + m := m + 8 + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Swizzle
- - - - Load contiguous active 8-bit integers from "a" (those with their respective bit - set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := a[m+7:m] - m := m + 8 - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Swizzle + + + + Load contiguous active 8-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := a[m+7:m] + m := m + 8 + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Swizzle
- - - - - Load contiguous active 8-bit integers from "a" (those with their respective bit - set in mask "k"), and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[i+7:i] := a[m+7:m] - m := m + 8 - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Swizzle + + + + + Load contiguous active 8-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[i+7:i] := a[m+7:m] + m := m + 8 + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Swizzle
- - - - Contiguously store the active 16-bit integers in "a" (those with their - respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. - - size := 16 - m := 0 - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[m+size-1:m] := a[i+15:i] - m := m + size - FI - ENDFOR - dst[255:m] := 0 - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Swizzle + + + + Contiguously store the active 16-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 16 +m := 0 +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[m+size-1:m] := a[i+15:i] + m := m + size + FI +ENDFOR +dst[255:m] := 0 +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Swizzle
- - - - - Contiguously store the active 16-bit integers in "a" (those with their - respective bit set in writemask "k") to "dst", and pass through the remaining elements - from "src". - - size := 16 - m := 0 - FOR j := 0 to 15 - i := j*16 - IF k[j] - dst[m+size-1:m] := a[i+15:i] - m := m + size - FI - ENDFOR - dst[255:m] := src[255:m] - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Swizzle + + + + + Contiguously store the active 16-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 16 +m := 0 +FOR j := 0 to 15 + i := j*16 + IF k[j] + dst[m+size-1:m] := a[i+15:i] + m := m + size + FI +ENDFOR +dst[255:m] := src[255:m] +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Swizzle
- - - - Contiguously store the active 16-bit integers in "a" (those with their - respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. - - size := 16 - m := 0 - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[m+size-1:m] := a[i+15:i] - m := m + size - FI - ENDFOR - dst[127:m] := 0 - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Swizzle + + + + Contiguously store the active 16-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 16 +m := 0 +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[m+size-1:m] := a[i+15:i] + m := m + size + FI +ENDFOR +dst[127:m] := 0 +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Swizzle
- - - - - Contiguously store the active 16-bit integers in "a" (those with their - respective bit set in writemask "k") to "dst", and pass through the remaining elements - from "src". - - size := 16 - m := 0 - FOR j := 0 to 7 - i := j*16 - IF k[j] - dst[m+size-1:m] := a[i+15:i] - m := m + size - FI - ENDFOR - dst[127:m] := src[127:m] - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Swizzle + + + + + Contiguously store the active 16-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 16 +m := 0 +FOR j := 0 to 7 + i := j*16 + IF k[j] + dst[m+size-1:m] := a[i+15:i] + m := m + size + FI +ENDFOR +dst[127:m] := src[127:m] +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Swizzle
- - - - Contiguously store the active 8-bit integers in "a" (those with their - respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. - - size := 8 - m := 0 - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[m+size-1:m] := a[i+7:i] - m := m + size - FI - ENDFOR - dst[255:m] := 0 - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Swizzle + + + + Contiguously store the active 8-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 8 +m := 0 +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[m+size-1:m] := a[i+7:i] + m := m + size + FI +ENDFOR +dst[255:m] := 0 +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Swizzle
- - - - - Contiguously store the active 8-bit integers in "a" (those with their - respective bit set in writemask "k") to "dst", and pass through the remaining elements - from "src". - - size := 8 - m := 0 - FOR j := 0 to 31 - i := j*8 - IF k[j] - dst[m+size-1:m] := a[i+7:i] - m := m + size - FI - ENDFOR - dst[255:m] := src[255:m] - dst[MAX:256] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Swizzle + + + + + Contiguously store the active 8-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 8 +m := 0 +FOR j := 0 to 31 + i := j*8 + IF k[j] + dst[m+size-1:m] := a[i+7:i] + m := m + size + FI +ENDFOR +dst[255:m] := src[255:m] +dst[MAX:256] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Swizzle
- - - - Contiguously store the active 8-bit integers in "a" (those with their - respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. - - size := 8 - m := 0 - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[m+size-1:m] := a[i+7:i] - m := m + size - FI - ENDFOR - dst[127:m] := 0 - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Swizzle + + + + Contiguously store the active 8-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 8 +m := 0 +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[m+size-1:m] := a[i+7:i] + m := m + size + FI +ENDFOR +dst[127:m] := 0 +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Swizzle
- - - - - Contiguously store the active 8-bit integers in "a" (those with their - respective bit set in writemask "k") to "dst", and pass through the remaining elements - from "src". - - size := 8 - m := 0 - FOR j := 0 to 15 - i := j*8 - IF k[j] - dst[m+size-1:m] := a[i+7:i] - m := m + size - FI - ENDFOR - dst[127:m] := src[127:m] - dst[MAX:128] := 0 - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Swizzle + + + + + Contiguously store the active 8-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 8 +m := 0 +FOR j := 0 to 15 + i := j*8 + IF k[j] + dst[m+size-1:m] := a[i+7:i] + m := m + size + FI +ENDFOR +dst[127:m] := src[127:m] +dst[MAX:128] := 0 + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Swizzle
- Swizzle - - - - - Contiguously store the active 16-bit integers in "a" (those with their - respective bit set in writemask "k") to unaligned memory at "base_addr". - - size := 16 - m := base_addr - FOR j := 0 to 15 - i := j*16 - IF k[j] - MEM[m+size-1:m] := a[i+15:i] - m := m + size - FI - ENDFOR - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Store + Swizzle + + + + + Contiguously store the active 16-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 16 +m := base_addr +FOR j := 0 to 15 + i := j*16 + IF k[j] + MEM[m+size-1:m] := a[i+15:i] + m := m + size + FI +ENDFOR + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Store
- Swizzle - - - - - Contiguously store the active 16-bit integers in "a" (those with their - respective bit set in writemask "k") to unaligned memory at "base_addr". - - size := 16 - m := base_addr - FOR j := 0 to 7 - i := j*16 - IF k[j] - MEM[m+size-1:m] := a[i+15:i] - m := m + size - FI - ENDFOR - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Store + Swizzle + + + + + Contiguously store the active 16-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 16 +m := base_addr +FOR j := 0 to 7 + i := j*16 + IF k[j] + MEM[m+size-1:m] := a[i+15:i] + m := m + size + FI +ENDFOR + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Store
- Swizzle - - - - - Contiguously store the active 8-bit integers in "a" (those with their - respective bit set in writemask "k") to unaligned memory at "base_addr". - - size := 8 - m := base_addr - FOR j := 0 to 31 - i := j*8 - IF k[j] - MEM[m+size-1:m] := a[i+7:i] - m := m + size - FI - ENDFOR - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Store + Swizzle + + + + + Contiguously store the active 8-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 8 +m := base_addr +FOR j := 0 to 31 + i := j*8 + IF k[j] + MEM[m+size-1:m] := a[i+7:i] + m := m + size + FI +ENDFOR + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Store
- Swizzle - - - - - Contiguously store the active 8-bit integers in "a" (those with their - respective bit set in writemask "k") to unaligned memory at "base_addr". - - size := 8 - m := base_addr - FOR j := 0 to 15 - i := j*8 - IF k[j] - MEM[m+size-1:m] := a[i+7:i] - m := m + size - FI - ENDFOR - - - AVX512_VBMI2 - AVX512VL -
immintrin.h
- Store -
- - - - - - - - - Concatenate packed 64-bit integers in "b" and "a" producing an intermediate - 128-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 64-bits in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> (c[i+63:i] & - 63) - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + Swizzle + + + + + Contiguously store the active 8-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 8 +m := base_addr +FOR j := 0 to 15 + i := j*8 + IF k[j] + MEM[m+size-1:m] := a[i+7:i] + m := m + size + FI +ENDFOR + + + AVX512_VBMI2 + AVX512VL +
immintrin.h
+ Store +
+ + + + + + + + + Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 64-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> (c[i+63:i] & 63) + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - - Concatenate packed 64-bit integers in "b" and "a" producing an intermediate - 128-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 64-bits in "dst" using writemask "k" (elements are - copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> (c[i+63:i] & - 63) - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + + Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 64-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> (c[i+63:i] & 63) + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - Concatenate packed 64-bit integers in "b" and "a" producing an intermediate - 128-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 64-bits in "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> (c[i+63:i] & - 63) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 64-bits in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> (c[i+63:i] & 63) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - - Concatenate packed 32-bit integers in "b" and "a" producing an intermediate - 64-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 32-bits in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> (c[i+31:i] & 31) - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + + Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 32-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> (c[i+31:i] & 31) + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - - Concatenate packed 32-bit integers in "b" and "a" producing an intermediate - 64-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 32-bits in "dst" using writemask "k" (elements are - copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> (c[i+31:i] & 31) - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + + Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 32-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> (c[i+31:i] & 31) + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - Concatenate packed 32-bit integers in "b" and "a" producing an intermediate - 64-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 32-bits in "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> (c[i+31:i] & 31) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 32-bits in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> (c[i+31:i] & 31) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - - Concatenate packed 16-bit integers in "b" and "a" producing an intermediate - 32-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 16-bits in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> (c[i+15:i] & 15) - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + + Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 16-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> (c[i+15:i] & 15) + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - - Concatenate packed 16-bit integers in "b" and "a" producing an intermediate - 32-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 16-bits in "dst" using writemask "k" (elements are - copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> (c[i+15:i] & 15) - ELSE - dst[i+15:i] := a[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + + Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 16-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> (c[i+15:i] & 15) + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - Concatenate packed 16-bit integers in "b" and "a" producing an intermediate - 32-bit result. Shift the result right by the amount specified in the corresponding - element of "c", and store the lower 16-bits in "dst". - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> (c[i+15:i] & 15) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 16-bits in "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> (c[i+15:i] & 15) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - - Concatenate packed 64-bit integers in "b" and "a" producing an intermediate - 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> imm8[5:0] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + + Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> imm8[5:0] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - - - Concatenate packed 64-bit integers in "b" and "a" producing an intermediate - 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in - "dst" using writemask "k" (elements are copied from "src"" when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> imm8[5:0] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + + + Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in "dst" using writemask "k" (elements are copied from "src"" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> imm8[5:0] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - Concatenate packed 64-bit integers in "b" and "a" producing an intermediate - 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in - "dst". - - FOR j := 0 to 7 - i := j*64 - dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> imm8[5:0] - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in "dst". + +FOR j := 0 to 7 + i := j*64 + dst[i+63:i] := ((b[i+63:i] << 64)[127:0] | a[i+63:i]) >> imm8[5:0] +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - - Concatenate packed 32-bit integers in "b" and "a" producing an intermediate - 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> imm8[4:0] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + + Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> imm8[4:0] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - - - Concatenate packed 32-bit integers in "b" and "a" producing an intermediate - 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> imm8[4:0] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + + + Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> imm8[4:0] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - Concatenate packed 32-bit integers in "b" and "a" producing an intermediate - 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in - "dst". - - FOR j := 0 to 15 - i := j*32 - dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> imm8[4:0] - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in "dst". + +FOR j := 0 to 15 + i := j*32 + dst[i+31:i] := ((b[i+31:i] << 32)[63:0] | a[i+31:i]) >> imm8[4:0] +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - - Concatenate packed 16-bit integers in "b" and "a" producing an intermediate - 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> imm8[3:0] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + + Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> imm8[3:0] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - - - Concatenate packed 16-bit integers in "b" and "a" producing an intermediate - 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> imm8[3:0] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + + + Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> imm8[3:0] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - Concatenate packed 16-bit integers in "b" and "a" producing an intermediate - 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in - "dst". - - FOR j := 0 to 31 - i := j*16 - dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> imm8[3:0] - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in "dst". + +FOR j := 0 to 31 + i := j*16 + dst[i+15:i] := ((b[i+15:i] << 16)[31:0] | a[i+15:i]) >> imm8[3:0] +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - - Concatenate packed 64-bit integers in "a" and "b" producing an intermediate - 128-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 64-bits in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << (c[i+63:i] & 63) - dst[i+63:i] := tmp[127:64] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + + Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 64-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << (c[i+63:i] & 63) + dst[i+63:i] := tmp[127:64] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - - Concatenate packed 64-bit integers in "a" and "b" producing an intermediate - 128-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 64-bits in "dst" using writemask "k" (elements are - copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << (c[i+63:i] & 63) - dst[i+63:i] := tmp[127:64] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + + Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 64-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << (c[i+63:i] & 63) + dst[i+63:i] := tmp[127:64] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - Concatenate packed 64-bit integers in "a" and "b" producing an intermediate - 128-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 64-bits in "dst". - - FOR j := 0 to 7 - i := j*64 - tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << (c[i+63:i] & 63) - dst[i+63:i] := tmp[127:64] - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 64-bits in "dst". + +FOR j := 0 to 7 + i := j*64 + tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << (c[i+63:i] & 63) + dst[i+63:i] := tmp[127:64] +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - - Concatenate packed 32-bit integers in "a" and "b" producing an intermediate - 64-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 32-bits in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << (c[i+31:i] & 31) - dst[i+31:i] := tmp[63:32] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + + Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 32-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << (c[i+31:i] & 31) + dst[i+31:i] := tmp[63:32] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - - Concatenate packed 32-bit integers in "a" and "b" producing an intermediate - 64-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 32-bits in "dst" using writemask "k" (elements are - copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << (c[i+31:i] & 31) - dst[i+31:i] := tmp[63:32] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + + Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 32-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << (c[i+31:i] & 31) + dst[i+31:i] := tmp[63:32] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - Concatenate packed 32-bit integers in "a" and "b" producing an intermediate - 64-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 32-bits in "dst". - - FOR j := 0 to 15 - i := j*32 - tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << (c[i+31:i] & 31) - dst[i+31:i] := tmp[63:32] - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 32-bits in "dst". + +FOR j := 0 to 15 + i := j*32 + tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << (c[i+31:i] & 31) + dst[i+31:i] := tmp[63:32] +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - - Concatenate packed 16-bit integers in "a" and "b" producing an intermediate - 32-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 16-bits in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << (c[i+15:i] & 15) - dst[i+15:i] := tmp[31:16] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + + Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 16-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << (c[i+15:i] & 15) + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - - Concatenate packed 16-bit integers in "a" and "b" producing an intermediate - 32-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 16-bits in "dst" using writemask "k" (elements are - copied from "a" when the corresponding mask bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << (c[i+15:i] & 15) - dst[i+15:i] := tmp[31:16] - ELSE - dst[i+15:i] := a[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + + Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 16-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << (c[i+15:i] & 15) + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - Concatenate packed 16-bit integers in "a" and "b" producing an intermediate - 32-bit result. Shift the result left by the amount specified in the corresponding - element of "c", and store the upper 16-bits in "dst". - - FOR j := 0 to 31 - i := j*16 - tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << (c[i+15:i] & 15) - dst[i+15:i] := tmp[31:16] - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 16-bits in "dst". + +FOR j := 0 to 31 + i := j*16 + tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << (c[i+15:i] & 15) + dst[i+15:i] := tmp[31:16] +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - - Concatenate packed 64-bit integers in "a" and "b" producing an intermediate - 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << imm8[5:0] - dst[i+63:i] := tmp[127:64] - ELSE - dst[i+63:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + + Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << imm8[5:0] + dst[i+63:i] := tmp[127:64] + ELSE + dst[i+63:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - - - Concatenate packed 64-bit integers in "a" and "b" producing an intermediate - 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 7 - i := j*64 - IF k[j] - tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << imm8[5:0] - dst[i+63:i] := tmp[127:64] - ELSE - dst[i+63:i] := src[i+63:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + + + Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + i := j*64 + IF k[j] + tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << imm8[5:0] + dst[i+63:i] := tmp[127:64] + ELSE + dst[i+63:i] := src[i+63:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - Concatenate packed 64-bit integers in "a" and "b" producing an intermediate - 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in - "dst"). - - FOR j := 0 to 7 - i := j*64 - tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << imm8[5:0] - dst[i+63:i] := tmp[127:64] - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in "dst"). + +FOR j := 0 to 7 + i := j*64 + tmp[127:0] := ((a[i+63:i] << 64)[127:0] | b[i+63:i]) << imm8[5:0] + dst[i+63:i] := tmp[127:64] +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - - Concatenate packed 32-bit integers in "a" and "b" producing an intermediate - 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << imm8[4:0] - dst[i+31:i] := tmp[63:32] - ELSE - dst[i+31:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + + Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << imm8[4:0] + dst[i+31:i] := tmp[63:32] + ELSE + dst[i+31:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - - - Concatenate packed 32-bit integers in "a" and "b" producing an intermediate - 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 15 - i := j*32 - IF k[j] - tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << imm8[4:0] - dst[i+31:i] := tmp[63:32] - ELSE - dst[i+31:i] := src[i+31:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + + + Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + i := j*32 + IF k[j] + tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << imm8[4:0] + dst[i+31:i] := tmp[63:32] + ELSE + dst[i+31:i] := src[i+31:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - Concatenate packed 32-bit integers in "a" and "b" producing an intermediate - 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in - "dst". - - FOR j := 0 to 15 - i := j*32 - tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << imm8[4:0] - dst[i+31:i] := tmp[63:32] - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in "dst". + +FOR j := 0 to 15 + i := j*32 + tmp[63:0] := ((a[i+31:i] << 32)[63:0] | b[i+31:i]) << imm8[4:0] + dst[i+31:i] := tmp[63:32] +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - - Concatenate packed 16-bit integers in "a" and "b" producing an intermediate - 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in - "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not - set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << imm8[3:0] - dst[i+15:i] := tmp[31:16] - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + + Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << imm8[3:0] + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - - - Concatenate packed 16-bit integers in "a" and "b" producing an intermediate - 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in - "dst" using writemask "k" (elements are copied from "src" when the corresponding mask - bit is not set). - - FOR j := 0 to 31 - i := j*16 - IF k[j] - tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << imm8[3:0] - dst[i+15:i] := tmp[31:16] - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + + + Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 31 + i := j*16 + IF k[j] + tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << imm8[3:0] + dst[i+15:i] := tmp[31:16] + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- - - - - Concatenate packed 16-bit integers in "a" and "b" producing an intermediate - 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in - "dst"). - - FOR j := 0 to 31 - i := j*16 - tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << imm8[3:0] - dst[i+15:i] := tmp[31:16] - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Shift + + + + + Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in "dst"). + +FOR j := 0 to 31 + i := j*16 + tmp[31:0] := ((a[i+15:i] << 16)[31:0] | b[i+15:i]) << imm8[3:0] + dst[i+15:i] := tmp[31:16] +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Shift
- Swizzle - - - - Load contiguous active 16-bit integers from unaligned memory at "mem_addr" - (those with their respective bit set in mask "k"), and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := MEM[mem_addr+m+15:mem_addr+m] - m := m + 16 - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Load + Swizzle + + + + Load contiguous active 16-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := MEM[mem_addr+m+15:mem_addr+m] + m := m + 16 + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Load
- Swizzle - - - - - Load contiguous active 16-bit integers from unaligned memory at "mem_addr" - (those with their respective bit set in mask "k"), and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - m := 0 - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := MEM[mem_addr+m+15:mem_addr+m] - m := m + 16 - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Load + Swizzle + + + + + Load contiguous active 16-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := MEM[mem_addr+m+15:mem_addr+m] + m := m + 16 + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Load
- Swizzle - - - - Load contiguous active 8-bit integers from unaligned memory at "mem_addr" - (those with their respective bit set in mask "k"), and store the results in "dst" using - zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := MEM[mem_addr+m+7:mem_addr+m] - m := m + 8 - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Load + Swizzle + + + + Load contiguous active 8-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := MEM[mem_addr+m+7:mem_addr+m] + m := m + 8 + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Load
- Swizzle - - - - - Load contiguous active 8-bit integers from unaligned memory at "mem_addr" - (those with their respective bit set in mask "k"), and store the results in "dst" using - writemask "k" (elements are copied from "src" when the corresponding mask bit is not - set). - - m := 0 - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := MEM[mem_addr+m+7:mem_addr+m] - m := m + 8 - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Load + Swizzle + + + + + Load contiguous active 8-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := MEM[mem_addr+m+7:mem_addr+m] + m := m + 8 + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Load
- - - - Load contiguous active 16-bit integers from "a" (those with their respective - bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are - zeroed out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := a[m+15:m] - m := m + 16 - ELSE - dst[i+15:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Swizzle + + + + Load contiguous active 16-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := a[m+15:m] + m := m + 16 + ELSE + dst[i+15:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Swizzle
- - - - - Load contiguous active 16-bit integers from "a" (those with their respective - bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[i+15:i] := a[m+15:m] - m := m + 16 - ELSE - dst[i+15:i] := src[i+15:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Swizzle + + + + + Load contiguous active 16-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[i+15:i] := a[m+15:m] + m := m + 16 + ELSE + dst[i+15:i] := src[i+15:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Swizzle
- - - - Load contiguous active 8-bit integers from "a" (those with their respective bit - set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := a[m+7:m] - m := m + 8 - ELSE - dst[i+7:i] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Swizzle + + + + Load contiguous active 8-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := a[m+7:m] + m := m + 8 + ELSE + dst[i+7:i] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Swizzle
- - - - - Load contiguous active 8-bit integers from "a" (those with their respective bit - set in mask "k"), and store the results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - - m := 0 - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[i+7:i] := a[m+7:m] - m := m + 8 - ELSE - dst[i+7:i] := src[i+7:i] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Swizzle + + + + + Load contiguous active 8-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +m := 0 +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[i+7:i] := a[m+7:m] + m := m + 8 + ELSE + dst[i+7:i] := src[i+7:i] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Swizzle
- - - - Contiguously store the active 16-bit integers in "a" (those with their - respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. - - size := 16 - m := 0 - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[m+size-1:m] := a[i+15:i] - m := m + size - FI - ENDFOR - dst[511:m] := 0 - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Swizzle + + + + Contiguously store the active 16-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 16 +m := 0 +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[m+size-1:m] := a[i+15:i] + m := m + size + FI +ENDFOR +dst[511:m] := 0 +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Swizzle
- - - - - Contiguously store the active 16-bit integers in "a" (those with their - respective bit set in writemask "k") to "dst", and pass through the remaining elements - from "src". - - size := 16 - m := 0 - FOR j := 0 to 31 - i := j*16 - IF k[j] - dst[m+size-1:m] := a[i+15:i] - m := m + size - FI - ENDFOR - dst[511:m] := src[511:m] - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Swizzle + + + + + Contiguously store the active 16-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 16 +m := 0 +FOR j := 0 to 31 + i := j*16 + IF k[j] + dst[m+size-1:m] := a[i+15:i] + m := m + size + FI +ENDFOR +dst[511:m] := src[511:m] +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Swizzle
- - - - Contiguously store the active 8-bit integers in "a" (those with their - respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. - - size := 8 - m := 0 - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[m+size-1:m] := a[i+7:i] - m := m + size - FI - ENDFOR - dst[511:m] := 0 - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Swizzle + + + + Contiguously store the active 8-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero. + +size := 8 +m := 0 +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[m+size-1:m] := a[i+7:i] + m := m + size + FI +ENDFOR +dst[511:m] := 0 +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Swizzle
- - - - - Contiguously store the active 8-bit integers in "a" (those with their - respective bit set in writemask "k") to "dst", and pass through the remaining elements - from "src". - - size := 8 - m := 0 - FOR j := 0 to 63 - i := j*8 - IF k[j] - dst[m+size-1:m] := a[i+7:i] - m := m + size - FI - ENDFOR - dst[511:m] := src[511:m] - dst[MAX:512] := 0 - - - AVX512_VBMI2 -
immintrin.h
- Swizzle + + + + + Contiguously store the active 8-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src". + +size := 8 +m := 0 +FOR j := 0 to 63 + i := j*8 + IF k[j] + dst[m+size-1:m] := a[i+7:i] + m := m + size + FI +ENDFOR +dst[511:m] := src[511:m] +dst[MAX:512] := 0 + + + AVX512_VBMI2 +
immintrin.h
+ Swizzle
- Swizzle - - - - - Contiguously store the active 16-bit integers in "a" (those with their - respective bit set in writemask "k") to unaligned memory at "base_addr". - - size := 16 - m := base_addr - FOR j := 0 to 31 - i := j*16 - IF k[j] - MEM[m+size-1:m] := a[i+15:i] - m := m + size - FI - ENDFOR - - - AVX512_VBMI2 -
immintrin.h
- Store + Swizzle + + + + + Contiguously store the active 16-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 16 +m := base_addr +FOR j := 0 to 31 + i := j*16 + IF k[j] + MEM[m+size-1:m] := a[i+15:i] + m := m + size + FI +ENDFOR + + + AVX512_VBMI2 +
immintrin.h
+ Store
- Swizzle - - - - - Contiguously store the active 8-bit integers in "a" (those with their - respective bit set in writemask "k") to unaligned memory at "base_addr". - - size := 8 - m := base_addr - FOR j := 0 to 63 - i := j*8 - IF k[j] - MEM[m+size-1:m] := a[i+7:i] - m := m + size - FI - ENDFOR - - - AVX512_VBMI2 -
immintrin.h
- Store -
- - - - - - - - - Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with - corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. - Sum these 2 results with the corresponding 32-bit integer in "src" using signed - saturation, and store the packed 32-bit results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - IF k[j] - tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) - tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) - dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2) - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VNNI - AVX512VL -
immintrin.h
- Arithmetic + Swizzle + + + + + Contiguously store the active 8-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr". + +size := 8 +m := base_addr +FOR j := 0 to 63 + i := j*8 + IF k[j] + MEM[m+size-1:m] := a[i+7:i] + m := m + size + FI +ENDFOR + + + AVX512_VBMI2 +
immintrin.h
+ Store +
+ + + + + + + + + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) + tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) + dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2) + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VNNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with - corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. - Sum these 2 results with the corresponding 32-bit integer in "src" using signed - saturation, and store the packed 32-bit results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - IF k[j] - tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) - tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) - dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2) - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VNNI - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) + tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) + dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2) + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VNNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with - corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. - Sum these 2 results with the corresponding 32-bit integer in "src" using signed - saturation, and store the packed 32-bit results in "dst". - - FOR j := 0 to 7 - tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) - tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) - dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VNNI - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst". + +FOR j := 0 to 7 + tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) + tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) + dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VNNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with - corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. - Sum these 2 results with the corresponding 32-bit integer in "src" using signed - saturation, and store the packed 32-bit results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - IF k[j] - tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) - tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) - dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2) - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VNNI - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + IF k[j] + tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) + tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) + dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2) + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VNNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with - corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. - Sum these 2 results with the corresponding 32-bit integer in "src" using signed - saturation, and store the packed 32-bit results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - IF k[j] - tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) - tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) - dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2) - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VNNI - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + IF k[j] + tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) + tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) + dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2) + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VNNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with - corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. - Sum these 2 results with the corresponding 32-bit integer in "src" using signed - saturation, and store the packed 32-bit results in "dst". - - FOR j := 0 to 3 - tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) - tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) - dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VNNI - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst". + +FOR j := 0 to 3 + tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) + tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) + dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VNNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with - corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. - Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed - 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - IF k[j] - tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) - tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) - dst.dword[j] := src.dword[j] + tmp1 + tmp2 - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VNNI - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) + tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) + dst.dword[j] := src.dword[j] + tmp1 + tmp2 + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VNNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with - corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. - Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed - 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - IF k[j] - tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) - tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) - dst.dword[j] := src.dword[j] + tmp1 + tmp2 - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VNNI - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) + tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) + dst.dword[j] := src.dword[j] + tmp1 + tmp2 + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VNNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with - corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. - Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed - 32-bit results in "dst". - - FOR j := 0 to 7 - tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) - tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) - dst.dword[j] := src.dword[j] + tmp1 + tmp2 - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VNNI - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst". + +FOR j := 0 to 7 + tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) + tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) + dst.dword[j] := src.dword[j] + tmp1 + tmp2 +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VNNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with - corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. - Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed - 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - IF k[j] - tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) - tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) - dst.dword[j] := src.dword[j] + tmp1 + tmp2 - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VNNI - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + IF k[j] + tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) + tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) + dst.dword[j] := src.dword[j] + tmp1 + tmp2 + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VNNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with - corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. - Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed - 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - IF k[j] - tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) - tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) - dst.dword[j] := src.dword[j] + tmp1 + tmp2 - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VNNI - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + IF k[j] + tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) + tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) + dst.dword[j] := src.dword[j] + tmp1 + tmp2 + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VNNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with - corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. - Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed - 32-bit results in "dst". - - FOR j := 0 to 3 - tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) - tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) - dst.dword[j] := src.dword[j] + tmp1 + tmp2 - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VNNI - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst". + +FOR j := 0 to 3 + tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) + tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) + dst.dword[j] := src.dword[j] + tmp1 + tmp2 +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VNNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with - corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed - saturation, and store the packed 32-bit results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 7 - IF k[j] - tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) - tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) - tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) - tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) - dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VNNI - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) + tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) + tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) + tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) + dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VNNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with - corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed - saturation, and store the packed 32-bit results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 7 - IF k[j] - tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) - tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) - tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) - tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) - dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VNNI - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) + tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) + tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) + tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) + dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VNNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with - corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed - saturation, and store the packed 32-bit results in "dst". - - FOR j := 0 to 7 - tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) - tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) - tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) - tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) - dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VNNI - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst". + +FOR j := 0 to 7 + tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) + tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) + tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) + tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) + dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VNNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with - corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed - saturation, and store the packed 32-bit results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 3 - IF k[j] - tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) - tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) - tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) - tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) - dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VNNI - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + IF k[j] + tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) + tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) + tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) + tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) + dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VNNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with - corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed - saturation, and store the packed 32-bit results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 3 - IF k[j] - tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) - tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) - tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) - tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) - dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VNNI - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + IF k[j] + tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) + tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) + tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) + tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) + dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VNNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with - corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed - saturation, and store the packed 32-bit results in "dst". - - FOR j := 0 to 3 - tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) - tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) - tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) - tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) - dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VNNI - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst". + +FOR j := 0 to 3 + tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) + tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) + tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) + tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) + dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VNNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with - corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "src", and store - the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 7 - IF k[j] - tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) - tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) - tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) - tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) - dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VNNI - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) + tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) + tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) + tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) + dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VNNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with - corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "src", and store - the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 7 - IF k[j] - tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) - tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) - tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) - tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) - dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VNNI - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 7 + IF k[j] + tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) + tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) + tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) + tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) + dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VNNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with - corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "src", and store - the packed 32-bit results in "dst". - - FOR j := 0 to 7 - tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) - tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) - tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) - tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) - dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 - ENDFOR - dst[MAX:256] := 0 - - - AVX512_VNNI - AVX512VL -
immintrin.h
- Arithmetic + + + + + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst". + +FOR j := 0 to 7 + tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) + tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) + tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) + tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) + dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +ENDFOR +dst[MAX:256] := 0 + + + AVX512_VNNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with - corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "src", and store - the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 3 - IF k[j] - tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) - tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) - tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) - tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) - dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VNNI - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 3 + IF k[j] + tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) + tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) + tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) + tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) + dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VNNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with - corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "src", and store - the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 3 - IF k[j] - tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) - tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) - tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) - tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) - dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VNNI - AVX512VL -
immintrin.h
- Arithmetic + + + + + + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 3 + IF k[j] + tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) + tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) + tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) + tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) + dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VNNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with - corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "src", and store - the packed 32-bit results in "dst". - - FOR j := 0 to 3 - tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) - tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) - tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) - tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) - dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 - ENDFOR - dst[MAX:128] := 0 - - - AVX512_VNNI - AVX512VL -
immintrin.h
- Arithmetic -
- - - - - - - - - Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with - corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. - Sum these 2 results with the corresponding 32-bit integer in "src" using signed - saturation, and store the packed 32-bit results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - IF k[j] - tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) - tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) - dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2) - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VNNI -
immintrin.h
- Arithmetic + + + + + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst". + +FOR j := 0 to 3 + tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) + tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) + tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) + tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) + dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +ENDFOR +dst[MAX:128] := 0 + + + AVX512_VNNI + AVX512VL +
immintrin.h
+ Arithmetic +
+ + + + + + + + + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) + tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) + dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2) + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VNNI +
immintrin.h
+ Arithmetic
- - - - - - Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with - corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. - Sum these 2 results with the corresponding 32-bit integer in "src" using signed - saturation, and store the packed 32-bit results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - IF k[j] - tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) - tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) - dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2) - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VNNI -
immintrin.h
- Arithmetic + + + + + + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) + tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) + dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2) + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VNNI +
immintrin.h
+ Arithmetic
- - - - - Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with - corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. - Sum these 2 results with the corresponding 32-bit integer in "src" using signed - saturation, and store the packed 32-bit results in "dst". - - FOR j := 0 to 15 - tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) - tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) - dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VNNI -
immintrin.h
- Arithmetic + + + + + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst". + +FOR j := 0 to 15 + tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) + tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) + dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VNNI +
immintrin.h
+ Arithmetic
- - - - - - Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with - corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. - Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed - 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - IF k[j] - tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) - tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) - dst.dword[j] := src.dword[j] + tmp1 + tmp2 - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VNNI -
immintrin.h
- Arithmetic + + + + + + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) + tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) + dst.dword[j] := src.dword[j] + tmp1 + tmp2 + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VNNI +
immintrin.h
+ Arithmetic
- - - - - - Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with - corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. - Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed - 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - IF k[j] - tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) - tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) - dst.dword[j] := src.dword[j] + tmp1 + tmp2 - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VNNI -
immintrin.h
- Arithmetic + + + + + + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) + tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) + dst.dword[j] := src.dword[j] + tmp1 + tmp2 + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VNNI +
immintrin.h
+ Arithmetic
- - - - - Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with - corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. - Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed - 32-bit results in "dst". - - FOR j := 0 to 15 - tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) - tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) - dst.dword[j] := src.dword[j] + tmp1 + tmp2 - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VNNI -
immintrin.h
- Arithmetic + + + + + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst". + +FOR j := 0 to 15 + tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) + tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) + dst.dword[j] := src.dword[j] + tmp1 + tmp2 +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VNNI +
immintrin.h
+ Arithmetic
- - - - - - Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with - corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed - saturation, and store the packed 32-bit results in "dst" using zeromask "k" (elements - are zeroed out when the corresponding mask bit is not set). - - FOR j := 0 to 15 - IF k[j] - tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) - tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) - tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) - tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) - dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VNNI -
immintrin.h
- Arithmetic + + + + + + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) + tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) + tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) + tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) + dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VNNI +
immintrin.h
+ Arithmetic
- - - - - - Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with - corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed - saturation, and store the packed 32-bit results in "dst" using writemask "k" (elements - are copied from "src" when the corresponding mask bit is not set). - - FOR j := 0 to 15 - IF k[j] - tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) - tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) - tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) - tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) - dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VNNI -
immintrin.h
- Arithmetic + + + + + + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) + tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) + tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) + tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) + dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VNNI +
immintrin.h
+ Arithmetic
- - - - - Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with - corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed - saturation, and store the packed 32-bit results in "dst". - - FOR j := 0 to 15 - tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) - tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) - tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) - tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) - dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VNNI -
immintrin.h
- Arithmetic + + + + + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst". + +FOR j := 0 to 15 + tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) + tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) + tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) + tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) + dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VNNI +
immintrin.h
+ Arithmetic
- - - - - - Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with - corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "src", and store - the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - FOR j := 0 to 15 - IF k[j] - tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) - tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) - tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) - tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) - dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 - ELSE - dst.dword[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VNNI -
immintrin.h
- Arithmetic + + + + + + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) + tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) + tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) + tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) + dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 + ELSE + dst.dword[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VNNI +
immintrin.h
+ Arithmetic
- - - - - - Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with - corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "src", and store - the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" - when the corresponding mask bit is not set). - - FOR j := 0 to 15 - IF k[j] - tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) - tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) - tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) - tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) - dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 - ELSE - dst.dword[j] := src.dword[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VNNI -
immintrin.h
- Arithmetic + + + + + + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +FOR j := 0 to 15 + IF k[j] + tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) + tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) + tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) + tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) + dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 + ELSE + dst.dword[j] := src.dword[j] + FI +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VNNI +
immintrin.h
+ Arithmetic
- - - - - Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with - corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "src", and store - the packed 32-bit results in "dst". - - FOR j := 0 to 15 - tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) - tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) - tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) - tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) - dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 - ENDFOR - dst[MAX:512] := 0 - - - AVX512_VNNI -
immintrin.h
- Arithmetic -
- - - - - - - - - Compute intersection of packed 32-bit integer vectors "a" and "b", and store - indication of match in the corresponding bit of two mask registers specified by "k1" and - "k2". A match in corresponding elements of "a" and "b" is indicated by a set bit in the - corresponding bit of the mask registers. - - MEM[k1+15:k1] := 0 - MEM[k2+15:k2] := 0 - FOR i := 0 TO 15 - FOR j := 0 TO 15 - match := (a.dword[i] == b.dword[j] ? 1 : 0) - MEM[k1+15:k1].bit[i] |= match - MEM[k2+15:k2].bit[j] |= match - ENDFOR - ENDFOR - - - AVX512_VP2INTERSECT - AVX512F -
immintrin.h
- Mask + + + + + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst". + +FOR j := 0 to 15 + tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) + tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) + tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) + tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) + dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +ENDFOR +dst[MAX:512] := 0 + + + AVX512_VNNI +
immintrin.h
+ Arithmetic +
+ + + + + + + + + Compute intersection of packed 32-bit integer vectors "a" and "b", and store indication of match in the corresponding bit of two mask registers specified by "k1" and "k2". A match in corresponding elements of "a" and "b" is indicated by a set bit in the corresponding bit of the mask registers. + +MEM[k1+15:k1] := 0 +MEM[k2+15:k2] := 0 +FOR i := 0 TO 15 + FOR j := 0 TO 15 + match := (a.dword[i] == b.dword[j] ? 1 : 0) + MEM[k1+15:k1].bit[i] |= match + MEM[k2+15:k2].bit[j] |= match + ENDFOR +ENDFOR + + + AVX512_VP2INTERSECT + AVX512F +
immintrin.h
+ Mask
- - - - - - Compute intersection of packed 64-bit integer vectors "a" and "b", and store - indication of match in the corresponding bit of two mask registers specified by "k1" and - "k2". A match in corresponding elements of "a" and "b" is indicated by a set bit in the - corresponding bit of the mask registers. - - MEM[k1+7:k1] := 0 - MEM[k2+7:k2] := 0 - FOR i := 0 TO 7 - FOR j := 0 TO 7 - match := (a.qword[i] == b.qword[j] ? 1 : 0) - MEM[k1+7:k1].bit[i] |= match - MEM[k2+7:k2].bit[j] |= match - ENDFOR - ENDFOR - - - AVX512_VP2INTERSECT - AVX512F -
immintrin.h
- Mask -
- - - - - - - - - Compute intersection of packed 32-bit integer vectors "a" and "b", and store - indication of match in the corresponding bit of two mask registers specified by "k1" and - "k2". A match in corresponding elements of "a" and "b" is indicated by a set bit in the - corresponding bit of the mask registers. - - MEM[k1+7:k1] := 0 - MEM[k2+7:k2] := 0 - FOR i := 0 TO 3 - FOR j := 0 TO 3 - match := (a.dword[i] == b.dword[j] ? 1 : 0) - MEM[k1+7:k1].bit[i] |= match - MEM[k2+7:k2].bit[j] |= match - ENDFOR - ENDFOR - - - AVX512_VP2INTERSECT - AVX512VL -
immintrin.h
- Mask + + + + + + Compute intersection of packed 64-bit integer vectors "a" and "b", and store indication of match in the corresponding bit of two mask registers specified by "k1" and "k2". A match in corresponding elements of "a" and "b" is indicated by a set bit in the corresponding bit of the mask registers. + +MEM[k1+7:k1] := 0 +MEM[k2+7:k2] := 0 +FOR i := 0 TO 7 + FOR j := 0 TO 7 + match := (a.qword[i] == b.qword[j] ? 1 : 0) + MEM[k1+7:k1].bit[i] |= match + MEM[k2+7:k2].bit[j] |= match + ENDFOR +ENDFOR + + + AVX512_VP2INTERSECT + AVX512F +
immintrin.h
+ Mask +
+ + + + + + + + + Compute intersection of packed 32-bit integer vectors "a" and "b", and store indication of match in the corresponding bit of two mask registers specified by "k1" and "k2". A match in corresponding elements of "a" and "b" is indicated by a set bit in the corresponding bit of the mask registers. + +MEM[k1+7:k1] := 0 +MEM[k2+7:k2] := 0 +FOR i := 0 TO 3 + FOR j := 0 TO 3 + match := (a.dword[i] == b.dword[j] ? 1 : 0) + MEM[k1+7:k1].bit[i] |= match + MEM[k2+7:k2].bit[j] |= match + ENDFOR +ENDFOR + + + AVX512_VP2INTERSECT + AVX512VL +
immintrin.h
+ Mask
- - - - - - Compute intersection of packed 32-bit integer vectors "a" and "b", and store - indication of match in the corresponding bit of two mask registers specified by "k1" and - "k2". A match in corresponding elements of "a" and "b" is indicated by a set bit in the - corresponding bit of the mask registers. - - MEM[k1+7:k1] := 0 - MEM[k2+7:k2] := 0 - FOR i := 0 TO 7 - FOR j := 0 TO 7 - match := (a.dword[i] == b.dword[j] ? 1 : 0) - MEM[k1+7:k1].bit[i] |= match - MEM[k2+7:k2].bit[j] |= match - ENDFOR - ENDFOR - - - AVX512_VP2INTERSECT - AVX512VL -
immintrin.h
- Mask + + + + + + Compute intersection of packed 32-bit integer vectors "a" and "b", and store indication of match in the corresponding bit of two mask registers specified by "k1" and "k2". A match in corresponding elements of "a" and "b" is indicated by a set bit in the corresponding bit of the mask registers. + +MEM[k1+7:k1] := 0 +MEM[k2+7:k2] := 0 +FOR i := 0 TO 7 + FOR j := 0 TO 7 + match := (a.dword[i] == b.dword[j] ? 1 : 0) + MEM[k1+7:k1].bit[i] |= match + MEM[k2+7:k2].bit[j] |= match + ENDFOR +ENDFOR + + + AVX512_VP2INTERSECT + AVX512VL +
immintrin.h
+ Mask
- - - - - - Compute intersection of packed 64-bit integer vectors "a" and "b", and store - indication of match in the corresponding bit of two mask registers specified by "k1" and - "k2". A match in corresponding elements of "a" and "b" is indicated by a set bit in the - corresponding bit of the mask registers. - - MEM[k1+7:k1] := 0 - MEM[k2+7:k2] := 0 - FOR i := 0 TO 1 - FOR j := 0 TO 1 - match := (a.qword[i] == b.qword[j] ? 1 : 0) - MEM[k1+7:k1].bit[i] |= match - MEM[k2+7:k2].bit[j] |= match - ENDFOR - ENDFOR - - - AVX512_VP2INTERSECT - AVX512VL -
immintrin.h
- Mask + + + + + + Compute intersection of packed 64-bit integer vectors "a" and "b", and store indication of match in the corresponding bit of two mask registers specified by "k1" and "k2". A match in corresponding elements of "a" and "b" is indicated by a set bit in the corresponding bit of the mask registers. + +MEM[k1+7:k1] := 0 +MEM[k2+7:k2] := 0 +FOR i := 0 TO 1 + FOR j := 0 TO 1 + match := (a.qword[i] == b.qword[j] ? 1 : 0) + MEM[k1+7:k1].bit[i] |= match + MEM[k2+7:k2].bit[j] |= match + ENDFOR +ENDFOR + + + AVX512_VP2INTERSECT + AVX512VL +
immintrin.h
+ Mask
- - - - - - Compute intersection of packed 64-bit integer vectors "a" and "b", and store - indication of match in the corresponding bit of two mask registers specified by "k1" and - "k2". A match in corresponding elements of "a" and "b" is indicated by a set bit in the - corresponding bit of the mask registers. - - MEM[k1+7:k1] := 0 - MEM[k2+7:k2] := 0 - FOR i := 0 TO 3 - FOR j := 0 TO 3 - match := (a.qword[i] == b.qword[j] ? 1 : 0) - MEM[k1+7:k1].bit[i] |= match - MEM[k2+7:k2].bit[j] |= match - ENDFOR - ENDFOR - - - AVX512_VP2INTERSECT - AVX512VL -
immintrin.h
- Mask -
- - - - - Multiply packed unsigned 52-bit integers in each 64-bit element of "__Y" and - "__Z" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from - the intermediate result with the corresponding unsigned 64-bit integer in "__X", and - store the results in "dst". - - - FOR j := 0 to 3 - i := j*64 - tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i]) - dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[103:52]) - ENDFOR - dst[MAX:256] := 0 + + + + + + Compute intersection of packed 64-bit integer vectors "a" and "b", and store indication of match in the corresponding bit of two mask registers specified by "k1" and "k2". A match in corresponding elements of "a" and "b" is indicated by a set bit in the corresponding bit of the mask registers. + +MEM[k1+7:k1] := 0 +MEM[k2+7:k2] := 0 +FOR i := 0 TO 3 + FOR j := 0 TO 3 + match := (a.qword[i] == b.qword[j] ? 1 : 0) + MEM[k1+7:k1].bit[i] |= match + MEM[k2+7:k2].bit[j] |= match + ENDFOR +ENDFOR + + + AVX512_VP2INTERSECT + AVX512VL +
immintrin.h
+ Mask +
+ + + + + Multiply packed unsigned 52-bit integers in each 64-bit element of "__Y" and "__Z" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "__X", and store the results in "dst". + + +FOR j := 0 to 3 + i := j*64 + tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i]) + dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[103:52]) +ENDFOR +dst[MAX:256] := 0 - - - - AVX_IFMA -
immintrin.h
- Arithmetic + + + + AVX_IFMA +
immintrin.h
+ Arithmetic
- - Multiply packed unsigned 52-bit integers in each 64-bit element of "__Y" and - "__Z" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from - the intermediate result with the corresponding unsigned 64-bit integer in "__X", and - store the results in "dst". - - - FOR j := 0 to 3 - i := j*64 - tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i]) - dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[51:0]) - ENDFOR - dst[MAX:256] := 0 + + Multiply packed unsigned 52-bit integers in each 64-bit element of "__Y" and "__Z" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "__X", and store the results in "dst". + + +FOR j := 0 to 3 + i := j*64 + tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i]) + dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[51:0]) +ENDFOR +dst[MAX:256] := 0 - - - - AVX_IFMA -
immintrin.h
- Arithmetic + + + + AVX_IFMA +
immintrin.h
+ Arithmetic
- - Multiply packed unsigned 52-bit integers in each 64-bit element of "__Y" and - "__Z" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from - the intermediate result with the corresponding unsigned 64-bit integer in "__X", and - store the results in "dst". - - - FOR j := 0 to 1 - i := j*64 - tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i]) - dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[103:52]) - ENDFOR - dst[MAX:128] := 0 + + Multiply packed unsigned 52-bit integers in each 64-bit element of "__Y" and "__Z" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "__X", and store the results in "dst". + + +FOR j := 0 to 1 + i := j*64 + tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i]) + dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[103:52]) +ENDFOR +dst[MAX:128] := 0 - - - - AVX_IFMA -
immintrin.h
- Arithmetic + + + + AVX_IFMA +
immintrin.h
+ Arithmetic
- - Multiply packed unsigned 52-bit integers in each 64-bit element of "__Y" and - "__Z" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from - the intermediate result with the corresponding unsigned 64-bit integer in "__X", and - store the results in "dst". - - - FOR j := 0 to 1 - i := j*64 - tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i]) - dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[51:0]) - ENDFOR - dst[MAX:128] := 0 + + Multiply packed unsigned 52-bit integers in each 64-bit element of "__Y" and "__Z" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "__X", and store the results in "dst". + + +FOR j := 0 to 1 + i := j*64 + tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i]) + dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[51:0]) +ENDFOR +dst[MAX:128] := 0 - - - - AVX_IFMA -
immintrin.h
- Arithmetic -
- - - Multiply packed unsigned 52-bit integers in each 64-bit element of "__Y" and - "__Z" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from - the intermediate result with the corresponding unsigned 64-bit integer in "__X", and - store the results in "dst". - - - FOR j := 0 to 3 - i := j*64 - tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i]) - dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[103:52]) - ENDFOR - dst[MAX:256] := 0 + + + + AVX_IFMA +
immintrin.h
+ Arithmetic +
+ + + Multiply packed unsigned 52-bit integers in each 64-bit element of "__Y" and "__Z" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "__X", and store the results in "dst". + + +FOR j := 0 to 3 + i := j*64 + tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i]) + dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[103:52]) +ENDFOR +dst[MAX:256] := 0 - - - - AVX_IFMA -
immintrin.h
- Arithmetic + + + + AVX_IFMA +
immintrin.h
+ Arithmetic
- - Multiply packed unsigned 52-bit integers in each 64-bit element of "__Y" and - "__Z" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from - the intermediate result with the corresponding unsigned 64-bit integer in "__X", and - store the results in "dst". - - - FOR j := 0 to 3 - i := j*64 - tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i]) - dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[51:0]) - ENDFOR - dst[MAX:256] := 0 + + Multiply packed unsigned 52-bit integers in each 64-bit element of "__Y" and "__Z" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "__X", and store the results in "dst". + + +FOR j := 0 to 3 + i := j*64 + tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i]) + dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[51:0]) +ENDFOR +dst[MAX:256] := 0 - - - - AVX_IFMA -
immintrin.h
- Arithmetic + + + + AVX_IFMA +
immintrin.h
+ Arithmetic
- - Multiply packed unsigned 52-bit integers in each 64-bit element of "__Y" and - "__Z" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from - the intermediate result with the corresponding unsigned 64-bit integer in "__X", and - store the results in "dst". - - - FOR j := 0 to 1 - i := j*64 - tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i]) - dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[103:52]) - ENDFOR - dst[MAX:128] := 0 + + Multiply packed unsigned 52-bit integers in each 64-bit element of "__Y" and "__Z" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "__X", and store the results in "dst". + + +FOR j := 0 to 1 + i := j*64 + tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i]) + dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[103:52]) +ENDFOR +dst[MAX:128] := 0 - - - - AVX_IFMA -
immintrin.h
- Arithmetic + + + + AVX_IFMA +
immintrin.h
+ Arithmetic
- - Multiply packed unsigned 52-bit integers in each 64-bit element of "__Y" and - "__Z" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from - the intermediate result with the corresponding unsigned 64-bit integer in "__X", and - store the results in "dst". - - - FOR j := 0 to 1 - i := j*64 - tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i]) - dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[51:0]) - ENDFOR - dst[MAX:128] := 0 + + Multiply packed unsigned 52-bit integers in each 64-bit element of "__Y" and "__Z" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "__X", and store the results in "dst". + + +FOR j := 0 to 1 + i := j*64 + tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i]) + dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[51:0]) +ENDFOR +dst[MAX:128] := 0 - - - - AVX_IFMA -
immintrin.h
- Arithmetic -
- - + + + + AVX_IFMA +
immintrin.h
+ Arithmetic +
+ + - Convert scalar BF16 (16-bit) floating-point element stored at memory locations - starting at location "__A" to a single-precision (32-bit) floating-point, broadcast it - to packed single-precision (32-bit) floating-point elements, and store the results in - "dst". + Convert scalar BF16 (16-bit) floating-point element stored at memory locations starting at location "__A" to a single-precision (32-bit) floating-point, broadcast it to packed single-precision (32-bit) floating-point elements, and store the results in "dst". - b := Convert_BF16_To_FP32(MEM[__A+15:__A]) - FOR j := 0 to 7 - m := j*32 - dst[m+31:m] := b - ENDFOR - dst[MAX:256] := 0 +b := Convert_BF16_To_FP32(MEM[__A+15:__A]) +FOR j := 0 to 7 + m := j*32 + dst[m+31:m] := b +ENDFOR +dst[MAX:256] := 0 - - AVX_NE_CONVERT -
immintrin.h
- Convert + + AVX_NE_CONVERT +
immintrin.h
+ Convert
- Convert scalar half-precision (16-bit) floating-point element stored at memory - locations starting at location "__A" to a single-precision (32-bit) floating-point, - broadcast it to packed single-precision (32-bit) floating-point elements, and store the - results in "dst". + Convert scalar half-precision (16-bit) floating-point element stored at memory locations starting at location "__A" to a single-precision (32-bit) floating-point, broadcast it to packed single-precision (32-bit) floating-point elements, and store the results in "dst". - b := Convert_FP16_To_FP32(MEM[__A+15:__A]) - FOR j := 0 to 7 - m := j*32 - dst[m+31:m] := b - ENDFOR - dst[MAX:256] := 0 +b := Convert_FP16_To_FP32(MEM[__A+15:__A]) +FOR j := 0 to 7 + m := j*32 + dst[m+31:m] := b +ENDFOR +dst[MAX:256] := 0 - - AVX_NE_CONVERT -
immintrin.h
- Convert + + AVX_NE_CONVERT +
immintrin.h
+ Convert
- Convert packed BF16 (16-bit) floating-point even-indexed elements stored at - memory locations starting at location "__A" to packed single-precision (32-bit) - floating-point elements, and store the results in "dst". + Convert packed BF16 (16-bit) floating-point even-indexed elements stored at memory locations starting at location "__A" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". - FOR j := 0 to 7 - m := j*32 - dst[m+31:m] := Convert_BF16_To_FP32(MEM[__A+m+15:__A+m]) - ENDFOR - dst[MAX:256] := 0 +FOR j := 0 to 7 + m := j*32 + dst[m+31:m] := Convert_BF16_To_FP32(MEM[__A+m+15:__A+m]) +ENDFOR +dst[MAX:256] := 0 - - AVX_NE_CONVERT -
immintrin.h
- Convert + + AVX_NE_CONVERT +
immintrin.h
+ Convert
- Convert packed half-precision (16-bit) floating-point even-indexed elements - stored at memory locations starting at location "__A" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst". + Convert packed half-precision (16-bit) floating-point even-indexed elements stored at memory locations starting at location "__A" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". - FOR j := 0 to 7 - m := j*32 - dst[m+31:m] := Convert_FP16_To_FP32(MEM[__A+m+15:__A+m]) - ENDFOR - dst[MAX:256] := 0 +FOR j := 0 to 7 + m := j*32 + dst[m+31:m] := Convert_FP16_To_FP32(MEM[__A+m+15:__A+m]) +ENDFOR +dst[MAX:256] := 0 - AVX_NE_CONVERT -
immintrin.h
- Convert + AVX_NE_CONVERT +
immintrin.h
+ Convert
- Convert packed BF16 (16-bit) floating-point odd-indexed elements stored at - memory locations starting at location "__A" to packed single-precision (32-bit) - floating-point elements, and store the results in "dst". + Convert packed BF16 (16-bit) floating-point odd-indexed elements stored at memory locations starting at location "__A" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". - FOR j := 0 to 7 - m := j*32 - dst[m+31:m] := Convert_BF16_To_FP32(MEM[__A+m+31:__A+m+16]) - ENDFOR - dst[MAX:256] := 0 +FOR j := 0 to 7 + m := j*32 + dst[m+31:m] := Convert_BF16_To_FP32(MEM[__A+m+31:__A+m+16]) +ENDFOR +dst[MAX:256] := 0 - AVX_NE_CONVERT -
immintrin.h
- Convert + AVX_NE_CONVERT +
immintrin.h
+ Convert
- Convert packed half-precision (16-bit) floating-point odd-indexed elements - stored at memory locations starting at location "__A" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst". + Convert packed half-precision (16-bit) floating-point odd-indexed elements stored at memory locations starting at location "__A" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". - FOR j := 0 to 7 - m := j*32 - dst[m+31:m] := Convert_FP16_To_FP32(MEM[__A+m+31:__A+m+16]) - ENDFOR - dst[MAX:256] := 0 +FOR j := 0 to 7 + m := j*32 + dst[m+31:m] := Convert_FP16_To_FP32(MEM[__A+m+31:__A+m+16]) +ENDFOR +dst[MAX:256] := 0 - AVX_NE_CONVERT -
immintrin.h
- Convert + AVX_NE_CONVERT +
immintrin.h
+ Convert
- Convert packed single-precision (32-bit) floating-point elements in "__A" to - packed BF16 (16-bit) floating-point elements, and store the results in "dst". - + Convert packed single-precision (32-bit) floating-point elements in "__A" to packed BF16 (16-bit) floating-point elements, and store the results in "dst". + - FOR j := 0 to 7 - dst.word[j] := Convert_FP32_To_BF16(__A.fp32[j]) - ENDFOR - dst[MAX:128] := 0 +FOR j := 0 to 7 + dst.word[j] := Convert_FP32_To_BF16(__A.fp32[j]) +ENDFOR +dst[MAX:128] := 0 - AVX_NE_CONVERT -
immintrin.h
- Convert + AVX_NE_CONVERT +
immintrin.h
+ Convert
- Convert scalar BF16 (16-bit) floating-point element stored at memory locations - starting at location "__A" to a single-precision (32-bit) floating-point, broadcast it - to packed single-precision (32-bit) floating-point elements, and store the results in - "dst". + Convert scalar BF16 (16-bit) floating-point element stored at memory locations starting at location "__A" to a single-precision (32-bit) floating-point, broadcast it to packed single-precision (32-bit) floating-point elements, and store the results in "dst". - b := Convert_BF16_To_FP32(MEM[__A+15:__A]) - FOR j := 0 to 3 - m := j*32 - dst[m+31:m] := b - ENDFOR - dst[MAX:128] := 0 +b := Convert_BF16_To_FP32(MEM[__A+15:__A]) +FOR j := 0 to 3 + m := j*32 + dst[m+31:m] := b +ENDFOR +dst[MAX:128] := 0 - - AVX_NE_CONVERT -
immintrin.h
- Convert + + AVX_NE_CONVERT +
immintrin.h
+ Convert
- Convert scalar half-precision (16-bit) floating-point element stored at memory - locations starting at location "__A" to a single-precision (32-bit) floating-point, - broadcast it to packed single-precision (32-bit) floating-point elements, and store the - results in "dst". + Convert scalar half-precision (16-bit) floating-point element stored at memory locations starting at location "__A" to a single-precision (32-bit) floating-point, broadcast it to packed single-precision (32-bit) floating-point elements, and store the results in "dst". - b := Convert_FP16_To_FP32(MEM[__A+15:__A]) - FOR j := 0 to 3 - m := j*32 - dst[m+31:m] := b - ENDFOR - dst[MAX:128] := 0 +b := Convert_FP16_To_FP32(MEM[__A+15:__A]) +FOR j := 0 to 3 + m := j*32 + dst[m+31:m] := b +ENDFOR +dst[MAX:128] := 0 - - AVX_NE_CONVERT -
immintrin.h
- Convert + + AVX_NE_CONVERT +
immintrin.h
+ Convert
- Convert packed BF16 (16-bit) floating-point even-indexed elements stored at - memory locations starting at location "__A" to packed single-precision (32-bit) - floating-point elements, and store the results in "dst". + Convert packed BF16 (16-bit) floating-point even-indexed elements stored at memory locations starting at location "__A" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". - FOR j := 0 to 3 - m := j*32 - dst[m+31:m] := Convert_BF16_To_FP32(MEM[__A+m+15:__A+m]) - ENDFOR - dst[MAX:128] := 0 +FOR j := 0 to 3 + m := j*32 + dst[m+31:m] := Convert_BF16_To_FP32(MEM[__A+m+15:__A+m]) +ENDFOR +dst[MAX:128] := 0 - AVX_NE_CONVERT -
immintrin.h
- Convert + AVX_NE_CONVERT +
immintrin.h
+ Convert
- Convert packed half-precision (16-bit) floating-point even-indexed elements - stored at memory locations starting at location "__A" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst". + Convert packed half-precision (16-bit) floating-point even-indexed elements stored at memory locations starting at location "__A" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". - FOR j := 0 to 3 - m := j*32 - dst[m+31:m] := Convert_FP16_To_FP32(MEM[__A+m+15:__A+m]) - ENDFOR - dst[MAX:128] := 0 +FOR j := 0 to 3 + m := j*32 + dst[m+31:m] := Convert_FP16_To_FP32(MEM[__A+m+15:__A+m]) +ENDFOR +dst[MAX:128] := 0 - AVX_NE_CONVERT -
immintrin.h
- Convert + AVX_NE_CONVERT +
immintrin.h
+ Convert
- Convert packed BF16 (16-bit) floating-point odd-indexed elements stored at - memory locations starting at location "__A" to packed single-precision (32-bit) - floating-point elements, and store the results in "dst". + Convert packed BF16 (16-bit) floating-point odd-indexed elements stored at memory locations starting at location "__A" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". - FOR j := 0 to 3 - m := j*32 - dst[m+31:m] := Convert_BF16_To_FP32(MEM[__A+m+31:__A+m+16]) - ENDFOR - dst[MAX:128] := 0 +FOR j := 0 to 3 + m := j*32 + dst[m+31:m] := Convert_BF16_To_FP32(MEM[__A+m+31:__A+m+16]) +ENDFOR +dst[MAX:128] := 0 - AVX_NE_CONVERT -
immintrin.h
- Convert + AVX_NE_CONVERT +
immintrin.h
+ Convert
- Convert packed half-precision (16-bit) floating-point odd-indexed elements - stored at memory locations starting at location "__A" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst". + Convert packed half-precision (16-bit) floating-point odd-indexed elements stored at memory locations starting at location "__A" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". - FOR j := 0 to 3 - m := j*32 - dst[m+31:m] := Convert_FP16_To_FP32(MEM[__A+m+31:__A+m+16]) - ENDFOR - dst[MAX:128] := 0 +FOR j := 0 to 3 + m := j*32 + dst[m+31:m] := Convert_FP16_To_FP32(MEM[__A+m+31:__A+m+16]) +ENDFOR +dst[MAX:128] := 0 - AVX_NE_CONVERT -
immintrin.h
- Convert + AVX_NE_CONVERT +
immintrin.h
+ Convert
- Convert packed single-precision (32-bit) floating-point elements in "__A" to - packed BF16 (16-bit) floating-point elements, and store the results in "dst". - + Convert packed single-precision (32-bit) floating-point elements in "__A" to packed BF16 (16-bit) floating-point elements, and store the results in "dst". + - FOR j := 0 to 3 - dst.word[j] := Convert_FP32_To_BF16(__A.fp32[j]) - ENDFOR - dst[MAX:128] := 0 +FOR j := 0 to 3 + dst.word[j] := Convert_FP32_To_BF16(__A.fp32[j]) +ENDFOR +dst[MAX:128] := 0 - AVX_NE_CONVERT -
immintrin.h
- Convert + AVX_NE_CONVERT +
immintrin.h
+ Convert
- Convert packed single-precision (32-bit) floating-point elements in "__A" to - packed BF16 (16-bit) floating-point elements, and store the results in "dst". - + Convert packed single-precision (32-bit) floating-point elements in "__A" to packed BF16 (16-bit) floating-point elements, and store the results in "dst". + - FOR j := 0 to 7 - dst.word[j] := Convert_FP32_To_BF16(__A.fp32[j]) - ENDFOR - dst[MAX:128] := 0 +FOR j := 0 to 7 + dst.word[j] := Convert_FP32_To_BF16(__A.fp32[j]) +ENDFOR +dst[MAX:128] := 0 - AVX_NE_CONVERT -
immintrin.h
- Convert + AVX_NE_CONVERT +
immintrin.h
+ Convert
- Convert packed single-precision (32-bit) floating-point elements in "__A" to - packed BF16 (16-bit) floating-point elements, and store the results in "dst". - + Convert packed single-precision (32-bit) floating-point elements in "__A" to packed BF16 (16-bit) floating-point elements, and store the results in "dst". + - FOR j := 0 to 3 - dst.word[j] := Convert_FP32_To_BF16(__A.fp32[j]) - ENDFOR - dst[MAX:128] := 0 +FOR j := 0 to 3 + dst.word[j] := Convert_FP32_To_BF16(__A.fp32[j]) +ENDFOR +dst[MAX:128] := 0 - AVX_NE_CONVERT -
immintrin.h
- Convert + AVX_NE_CONVERT +
immintrin.h
+ Convert
- + - Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with - corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "src", and store - the packed 32-bit results in "dst". + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst". - FOR j := 0 to 7 - tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) - tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) - tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) - tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) - dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 - ENDFOR - dst[MAX:256] := 0 +FOR j := 0 to 7 + tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) + tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) + tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) + tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) + dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +ENDFOR +dst[MAX:256] := 0 - AVX_VNNI -
immintrin.h
- Arithmetic + AVX_VNNI +
immintrin.h
+ Arithmetic
- Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with - corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed - saturation, and store the packed 32-bit results in "dst". + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst". - FOR j := 0 to 7 - tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) - tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) - tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) - tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) - dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) - ENDFOR - dst[MAX:256] := 0 +FOR j := 0 to 7 + tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) + tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) + tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) + tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) + dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +ENDFOR +dst[MAX:256] := 0 - AVX_VNNI -
immintrin.h
- Arithmetic + AVX_VNNI +
immintrin.h
+ Arithmetic
- Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with - corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. - Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed - 32-bit results in "dst". + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst". - FOR j := 0 to 7 - tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) - tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) - dst.dword[j] := src.dword[j] + tmp1 + tmp2 - ENDFOR - dst[MAX:256] := 0 +FOR j := 0 to 7 + tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) + tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) + dst.dword[j] := src.dword[j] + tmp1 + tmp2 +ENDFOR +dst[MAX:256] := 0 - AVX_VNNI -
immintrin.h
- Arithmetic + AVX_VNNI +
immintrin.h
+ Arithmetic
- Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with - corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. - Sum these 2 results with the corresponding 32-bit integer in "src" using signed - saturation, and store the packed 32-bit results in "dst". + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst". - FOR j := 0 to 7 - tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) - tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) - dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2) - ENDFOR - dst[MAX:256] := 0 +FOR j := 0 to 7 + tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) + tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) + dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2) +ENDFOR +dst[MAX:256] := 0 - AVX_VNNI -
immintrin.h
- Arithmetic + AVX_VNNI +
immintrin.h
+ Arithmetic
- Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with - corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "src", and store - the packed 32-bit results in "dst". + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst". - FOR j := 0 to 3 - tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) - tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) - tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) - tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) - dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 - ENDFOR - dst[MAX:128] := 0 +FOR j := 0 to 3 + tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) + tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) + tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) + tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) + dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +ENDFOR +dst[MAX:128] := 0 - AVX_VNNI -
immintrin.h
- Arithmetic + AVX_VNNI +
immintrin.h
+ Arithmetic
- Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with - corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed - saturation, and store the packed 32-bit results in "dst". + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst". - FOR j := 0 to 3 - tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) - tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) - tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) - tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) - dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) - ENDFOR - dst[MAX:128] := 0 +FOR j := 0 to 3 + tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) + tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) + tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) + tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) + dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +ENDFOR +dst[MAX:128] := 0 - AVX_VNNI -
immintrin.h
- Arithmetic + AVX_VNNI +
immintrin.h
+ Arithmetic
- Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with - corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. - Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed - 32-bit results in "dst". + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst". - FOR j := 0 to 3 - tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) - tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) - dst.dword[j] := src.dword[j] + tmp1 + tmp2 - ENDFOR - dst[MAX:128] := 0 +FOR j := 0 to 3 + tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) + tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) + dst.dword[j] := src.dword[j] + tmp1 + tmp2 +ENDFOR +dst[MAX:128] := 0 - AVX_VNNI -
immintrin.h
- Arithmetic + AVX_VNNI +
immintrin.h
+ Arithmetic
- Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with - corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. - Sum these 2 results with the corresponding 32-bit integer in "src" using signed - saturation, and store the packed 32-bit results in "dst". + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst". - FOR j := 0 to 3 - tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) - tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) - dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2) - ENDFOR - dst[MAX:128] := 0 +FOR j := 0 to 3 + tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) + tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) + dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2) +ENDFOR +dst[MAX:128] := 0 - AVX_VNNI -
immintrin.h
- Arithmetic + AVX_VNNI +
immintrin.h
+ Arithmetic
- + - Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with - corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "src", and store - the packed 32-bit results in "dst". + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst". - FOR j := 0 to 7 - tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) - tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) - tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) - tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) - dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 - ENDFOR - dst[MAX:256] := 0 +FOR j := 0 to 7 + tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) + tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) + tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) + tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) + dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +ENDFOR +dst[MAX:256] := 0 - AVX_VNNI -
immintrin.h
- Arithmetic + AVX_VNNI +
immintrin.h
+ Arithmetic
- Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with - corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed - saturation, and store the packed 32-bit results in "dst". + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst". - FOR j := 0 to 7 - tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) - tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) - tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) - tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) - dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) - ENDFOR - dst[MAX:256] := 0 +FOR j := 0 to 7 + tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) + tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) + tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) + tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) + dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +ENDFOR +dst[MAX:256] := 0 - AVX_VNNI -
immintrin.h
- Arithmetic + AVX_VNNI +
immintrin.h
+ Arithmetic
- Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with - corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. - Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed - 32-bit results in "dst". + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst". - FOR j := 0 to 7 - tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) - tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) - dst.dword[j] := src.dword[j] + tmp1 + tmp2 - ENDFOR - dst[MAX:256] := 0 +FOR j := 0 to 7 + tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) + tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) + dst.dword[j] := src.dword[j] + tmp1 + tmp2 +ENDFOR +dst[MAX:256] := 0 - AVX_VNNI -
immintrin.h
- Arithmetic + AVX_VNNI +
immintrin.h
+ Arithmetic
- Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with - corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. - Sum these 2 results with the corresponding 32-bit integer in "src" using signed - saturation, and store the packed 32-bit results in "dst". + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst". - FOR j := 0 to 7 - tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) - tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) - dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2) - ENDFOR - dst[MAX:256] := 0 +FOR j := 0 to 7 + tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) + tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) + dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2) +ENDFOR +dst[MAX:256] := 0 - AVX_VNNI -
immintrin.h
- Arithmetic + AVX_VNNI +
immintrin.h
+ Arithmetic
- Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with - corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "src", and store - the packed 32-bit results in "dst". + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst". - FOR j := 0 to 3 - tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) - tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) - tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) - tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) - dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 - ENDFOR - dst[MAX:128] := 0 +FOR j := 0 to 3 + tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) + tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) + tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) + tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) + dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +ENDFOR +dst[MAX:128] := 0 - AVX_VNNI -
immintrin.h
- Arithmetic + AVX_VNNI +
immintrin.h
+ Arithmetic
- Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with - corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed - saturation, and store the packed 32-bit results in "dst". + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst". - FOR j := 0 to 3 - tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) - tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) - tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) - tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) - dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) - ENDFOR - dst[MAX:128] := 0 +FOR j := 0 to 3 + tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j])) + tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1])) + tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2])) + tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3])) + dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +ENDFOR +dst[MAX:128] := 0 - AVX_VNNI -
immintrin.h
- Arithmetic + AVX_VNNI +
immintrin.h
+ Arithmetic
- Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with - corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. - Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed - 32-bit results in "dst". + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst". - FOR j := 0 to 3 - tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) - tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) - dst.dword[j] := src.dword[j] + tmp1 + tmp2 - ENDFOR - dst[MAX:128] := 0 +FOR j := 0 to 3 + tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) + tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) + dst.dword[j] := src.dword[j] + tmp1 + tmp2 +ENDFOR +dst[MAX:128] := 0 - AVX_VNNI -
immintrin.h
- Arithmetic + AVX_VNNI +
immintrin.h
+ Arithmetic
- Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with - corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. - Sum these 2 results with the corresponding 32-bit integer in "src" using signed - saturation, and store the packed 32-bit results in "dst". + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst". - FOR j := 0 to 3 - tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) - tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) - dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2) - ENDFOR - dst[MAX:128] := 0 +FOR j := 0 to 3 + tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j]) + tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1]) + dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2) +ENDFOR +dst[MAX:128] := 0 - AVX_VNNI -
immintrin.h
- Arithmetic -
- - - - Multiply groups of 2 adjacent pairs of signed 16-bit integers in "__A" with - corresponding unsigned 16-bit integers in "__B", producing 2 intermediate signed 32-bit - results. Sum these 2 results with the corresponding 32-bit integer in "__W", and store - the packed 32-bit results in "dst". - - - FOR j := 0 to 7 - tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) - tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) - dst.dword[j] := __W.dword[j] + tmp1 + tmp2 - ENDFOR - dst[MAX:256] := 0 + AVX_VNNI +
immintrin.h
+ Arithmetic +
+ + + + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "__A" with corresponding unsigned 16-bit integers in "__B", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "__W", and store the packed 32-bit results in "dst". + + +FOR j := 0 to 7 + tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) + tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) + dst.dword[j] := __W.dword[j] + tmp1 + tmp2 +ENDFOR +dst[MAX:256] := 0 - - - - AVX_VNNI_INT16 -
immintrin.h
- Arithmetic -
- - - Multiply groups of 2 adjacent pairs of signed 16-bit integers in "__A" with - corresponding unsigned 16-bit integers in "__B", producing 2 intermediate signed 32-bit - results. Sum these 2 results with the corresponding 32-bit integer in "__W" with signed - saturation, and store the packed 32-bit results in "dst". - - - FOR j := 0 to 7 - tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) - tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) - dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2) - ENDFOR - dst[MAX:256] := 0 - - - - AVX_VNNI_INT16 -
immintrin.h
- Arithmetic -
- - - Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in "__A" with - corresponding signed 16-bit integers in "__B", producing 2 intermediate signed 32-bit - results. Sum these 2 results with the corresponding 32-bit integer in "__W", and store - the packed 32-bit results in "dst". - - - FOR j := 0 to 7 - tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j]) - tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1]) - dst.dword[j] := __W.dword[j] + tmp1 + tmp2 - ENDFOR - dst[MAX:256] := 0 + + + + AVX_VNNI_INT16 +
immintrin.h
+ Arithmetic +
+ + + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "__A" with corresponding unsigned 16-bit integers in "__B", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "__W" with signed saturation, and store the packed 32-bit results in "dst". + + +FOR j := 0 to 7 + tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) + tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) + dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2) +ENDFOR +dst[MAX:256] := 0 + + + + AVX_VNNI_INT16 +
immintrin.h
+ Arithmetic +
+ + + Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in "__A" with corresponding signed 16-bit integers in "__B", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "__W", and store the packed 32-bit results in "dst". + + +FOR j := 0 to 7 + tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j]) + tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1]) + dst.dword[j] := __W.dword[j] + tmp1 + tmp2 +ENDFOR +dst[MAX:256] := 0 - - - - AVX_VNNI_INT16 -
immintrin.h
- Arithmetic -
- - - Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in "__A" with - corresponding signed 16-bit integers in "__B", producing 2 intermediate signed 32-bit - results. Sum these 2 results with the corresponding 32-bit integer in "__W" with signed - saturation, and store the packed 32-bit results in "dst". - - - FOR j := 0 to 7 - tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j]) - tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1]) - dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2) - ENDFOR - dst[MAX:256] := 0 - - - - AVX_VNNI_INT16 -
immintrin.h
- Arithmetic -
- - - Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in "__A" with - corresponding unsigned 16-bit integers in "__B", producing 2 intermediate signed 32-bit - results. Sum these 2 results with the corresponding 32-bit integer in "__W", and store - the packed 32-bit results in "dst". - - - FOR j := 0 to 7 - tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) - tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) - dst.dword[j] := __W.dword[j] + tmp1 + tmp2 - ENDFOR - dst[MAX:256] := 0 + + + + AVX_VNNI_INT16 +
immintrin.h
+ Arithmetic +
+ + + Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in "__A" with corresponding signed 16-bit integers in "__B", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "__W" with signed saturation, and store the packed 32-bit results in "dst". + + +FOR j := 0 to 7 + tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j]) + tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1]) + dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2) +ENDFOR +dst[MAX:256] := 0 + + + + AVX_VNNI_INT16 +
immintrin.h
+ Arithmetic +
+ + + Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in "__A" with corresponding unsigned 16-bit integers in "__B", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "__W", and store the packed 32-bit results in "dst". + + +FOR j := 0 to 7 + tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) + tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) + dst.dword[j] := __W.dword[j] + tmp1 + tmp2 +ENDFOR +dst[MAX:256] := 0 - - - - AVX_VNNI_INT16 -
immintrin.h
- Arithmetic -
- - - Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in "__A" with - corresponding unsigned 16-bit integers in "__B", producing 2 intermediate signed 32-bit - results. Sum these 2 results with the corresponding 32-bit integer in "__W" with signed - saturation, and store the packed 32-bit results in "dst". - - - FOR j := 0 to 7 - tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) - tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) - dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2) - ENDFOR - dst[MAX:256] := 0 - - - - AVX_VNNI_INT16 -
immintrin.h
- Arithmetic -
- - - Multiply groups of 2 adjacent pairs of signed 16-bit integers in "__A" with - corresponding unsigned 16-bit integers in "__B", producing 2 intermediate signed 32-bit - results. Sum these 2 results with the corresponding 32-bit integer in "__W", and store - the packed 32-bit results in "dst". - - - FOR j := 0 to 3 - tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) - tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) - dst.dword[j] := __W.dword[j] + tmp1 + tmp2 - ENDFOR - dst[MAX:128] := 0 + + + + AVX_VNNI_INT16 +
immintrin.h
+ Arithmetic +
+ + + Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in "__A" with corresponding unsigned 16-bit integers in "__B", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "__W" with signed saturation, and store the packed 32-bit results in "dst". + + +FOR j := 0 to 7 + tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) + tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) + dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2) +ENDFOR +dst[MAX:256] := 0 + + + + AVX_VNNI_INT16 +
immintrin.h
+ Arithmetic +
+ + + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "__A" with corresponding unsigned 16-bit integers in "__B", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "__W", and store the packed 32-bit results in "dst". + + +FOR j := 0 to 3 + tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) + tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) + dst.dword[j] := __W.dword[j] + tmp1 + tmp2 +ENDFOR +dst[MAX:128] := 0 - - - - AVX_VNNI_INT16 -
immintrin.h
- Arithmetic -
- - - Multiply groups of 2 adjacent pairs of signed 16-bit integers in "__A" with - corresponding unsigned 16-bit integers in "__B", producing 2 intermediate signed 32-bit - results. Sum these 2 results with the corresponding 32-bit integer in "__W" with signed - saturation, and store the packed 32-bit results in "dst". - - - FOR j := 0 to 3 - tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) - tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) - dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2) - ENDFOR - dst[MAX:128] := 0 - - - - AVX_VNNI_INT16 -
immintrin.h
- Arithmetic -
- - - Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in "__A" with - corresponding signed 16-bit integers in "__B", producing 2 intermediate signed 32-bit - results. Sum these 2 results with the corresponding 32-bit integer in "__W", and store - the packed 32-bit results in "dst". - - - FOR j := 0 to 3 - tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j]) - tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1]) - dst.dword[j] := __W.dword[j] + tmp1 + tmp2 - ENDFOR - dst[MAX:128] := 0 + + + + AVX_VNNI_INT16 +
immintrin.h
+ Arithmetic +
+ + + Multiply groups of 2 adjacent pairs of signed 16-bit integers in "__A" with corresponding unsigned 16-bit integers in "__B", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "__W" with signed saturation, and store the packed 32-bit results in "dst". + + +FOR j := 0 to 3 + tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) + tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) + dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2) +ENDFOR +dst[MAX:128] := 0 + + + + AVX_VNNI_INT16 +
immintrin.h
+ Arithmetic +
+ + + Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in "__A" with corresponding signed 16-bit integers in "__B", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "__W", and store the packed 32-bit results in "dst". + + +FOR j := 0 to 3 + tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j]) + tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1]) + dst.dword[j] := __W.dword[j] + tmp1 + tmp2 +ENDFOR +dst[MAX:128] := 0 - - - - AVX_VNNI_INT16 -
immintrin.h
- Arithmetic -
- - - Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in "__A" with - corresponding signed 16-bit integers in "__B", producing 2 intermediate signed 32-bit - results. Sum these 2 results with the corresponding 32-bit integer in "__W" with signed - saturation, and store the packed 32-bit results in "dst". - - - FOR j := 0 to 3 - tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j]) - tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1]) - dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2) - ENDFOR - dst[MAX:128] := 0 - - - - AVX_VNNI_INT16 -
immintrin.h
- Arithmetic -
- - - Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in "__A" with - corresponding unsigned 16-bit integers in "__B", producing 2 intermediate signed 32-bit - results. Sum these 2 results with the corresponding 32-bit integer in "__W", and store - the packed 32-bit results in "dst". - - - FOR j := 0 to 3 - tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) - tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) - dst.dword[j] := __W.dword[j] + tmp1 + tmp2 - ENDFOR - dst[MAX:128] := 0 + + + + AVX_VNNI_INT16 +
immintrin.h
+ Arithmetic +
+ + + Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in "__A" with corresponding signed 16-bit integers in "__B", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "__W" with signed saturation, and store the packed 32-bit results in "dst". + + +FOR j := 0 to 3 + tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j]) + tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1]) + dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2) +ENDFOR +dst[MAX:128] := 0 + + + + AVX_VNNI_INT16 +
immintrin.h
+ Arithmetic +
+ + + Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in "__A" with corresponding unsigned 16-bit integers in "__B", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "__W", and store the packed 32-bit results in "dst". + + +FOR j := 0 to 3 + tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) + tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) + dst.dword[j] := __W.dword[j] + tmp1 + tmp2 +ENDFOR +dst[MAX:128] := 0 - - - - AVX_VNNI_INT16 -
immintrin.h
- Arithmetic -
- - - Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in "__A" with - corresponding unsigned 16-bit integers in "__B", producing 2 intermediate signed 32-bit - results. Sum these 2 results with the corresponding 32-bit integer in "__W" with signed - saturation, and store the packed 32-bit results in "dst". - - - FOR j := 0 to 3 - tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) - tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) - dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2) - ENDFOR - dst[MAX:128] := 0 - - - - AVX_VNNI_INT16 -
immintrin.h
- Arithmetic -
- + + + + AVX_VNNI_INT16 +
immintrin.h
+ Arithmetic +
+ + + Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in "__A" with corresponding unsigned 16-bit integers in "__B", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "__W" with signed saturation, and store the packed 32-bit results in "dst". + + +FOR j := 0 to 3 + tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) + tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) + dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2) +ENDFOR +dst[MAX:128] := 0 + + + + AVX_VNNI_INT16 +
immintrin.h
+ Arithmetic +
+ - Multiply groups of 4 adjacent pairs of signed 8-bit integers in "__A" with - corresponding signed 8-bit integers in "__B", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "__W", and store - the packed 32-bit results in "dst". + Multiply groups of 4 adjacent pairs of signed 8-bit integers in "__A" with corresponding signed 8-bit integers in "__B", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "__W", and store the packed 32-bit results in "dst". - FOR j := 0 to 7 - tmp1.word := SignExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]) - tmp2.word := SignExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]) - tmp3.word := SignExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2]) - tmp4.word := SignExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3]) - dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 - ENDFOR - dst[MAX:256] := 0 +FOR j := 0 to 7 + tmp1.word := SignExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]) + tmp2.word := SignExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]) + tmp3.word := SignExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2]) + tmp4.word := SignExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3]) + dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +ENDFOR +dst[MAX:256] := 0 - AVX_VNNI_INT8 -
immintrin.h
- Arithmetic + AVX_VNNI_INT8 +
immintrin.h
+ Arithmetic
- Multiply groups of 4 adjacent pairs of signed 8-bit integers in "__A" with - corresponding signed 8-bit integers in "__B", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "__W" with signed - saturation, and store the packed 32-bit results in "dst". + Multiply groups of 4 adjacent pairs of signed 8-bit integers in "__A" with corresponding signed 8-bit integers in "__B", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "__W" with signed saturation, and store the packed 32-bit results in "dst". - FOR j := 0 to 7 - tmp1.word := SignExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]) - tmp2.word := SignExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]) - tmp3.word := SignExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2]) - tmp4.word := SignExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3]) - dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) - ENDFOR - dst[MAX:256] := 0 +FOR j := 0 to 7 + tmp1.word := SignExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]) + tmp2.word := SignExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]) + tmp3.word := SignExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2]) + tmp4.word := SignExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3]) + dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +ENDFOR +dst[MAX:256] := 0 - AVX_VNNI_INT8 -
immintrin.h
- Arithmetic + AVX_VNNI_INT8 +
immintrin.h
+ Arithmetic
- Multiply groups of 4 adjacent pairs of signed 8-bit integers in "__A" with - corresponding unsigned 8-bit integers in "__B", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "__W", and store - the packed 32-bit results in "dst". + Multiply groups of 4 adjacent pairs of signed 8-bit integers in "__A" with corresponding unsigned 8-bit integers in "__B", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "__W", and store the packed 32-bit results in "dst". - FOR j := 0 to 7 - tmp1.word := Signed(SignExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j])) - tmp2.word := Signed(SignExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1])) - tmp3.word := Signed(SignExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2])) - tmp4.word := Signed(SignExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3])) - dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 - ENDFOR - dst[MAX:256] := 0 +FOR j := 0 to 7 + tmp1.word := Signed(SignExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j])) + tmp2.word := Signed(SignExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1])) + tmp3.word := Signed(SignExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2])) + tmp4.word := Signed(SignExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3])) + dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +ENDFOR +dst[MAX:256] := 0 - AVX_VNNI_INT8 -
immintrin.h
- Arithmetic + AVX_VNNI_INT8 +
immintrin.h
+ Arithmetic
- Multiply groups of 4 adjacent pairs of signed 8-bit integers in "__A" with - corresponding unsigned 8-bit integers in "__B", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "__W" with signed - saturation, and store the packed 32-bit results in "dst". + Multiply groups of 4 adjacent pairs of signed 8-bit integers in "__A" with corresponding unsigned 8-bit integers in "__B", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "__W" with signed saturation, and store the packed 32-bit results in "dst". - FOR j := 0 to 7 - tmp1.word := Signed(SignExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j])) - tmp2.word := Signed(SignExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1])) - tmp3.word := Signed(SignExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2])) - tmp4.word := Signed(SignExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3])) - dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) - ENDFOR - dst[MAX:256] := 0 +FOR j := 0 to 7 + tmp1.word := Signed(SignExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j])) + tmp2.word := Signed(SignExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1])) + tmp3.word := Signed(SignExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2])) + tmp4.word := Signed(SignExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3])) + dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +ENDFOR +dst[MAX:256] := 0 - AVX_VNNI_INT8 -
immintrin.h
- Arithmetic + AVX_VNNI_INT8 +
immintrin.h
+ Arithmetic
- Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "__A" with - corresponding unsigned 8-bit integers in "__B", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "__W", and store - the packed 32-bit results in "dst". + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "__A" with corresponding unsigned 8-bit integers in "__B", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "__W", and store the packed 32-bit results in "dst". - FOR j := 0 to 7 - tmp1.word := ZeroExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j]) - tmp2.word := ZeroExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1]) - tmp3.word := ZeroExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2]) - tmp4.word := ZeroExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3]) - dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 - ENDFOR - dst[MAX:256] := 0 +FOR j := 0 to 7 + tmp1.word := ZeroExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j]) + tmp2.word := ZeroExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1]) + tmp3.word := ZeroExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2]) + tmp4.word := ZeroExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3]) + dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +ENDFOR +dst[MAX:256] := 0 - AVX_VNNI_INT8 -
immintrin.h
- Arithmetic + AVX_VNNI_INT8 +
immintrin.h
+ Arithmetic
- Multiply groups of 4 adjacent pairs of signed 8-bit integers in "__A" with - corresponding unsigned 8-bit integers in "__B", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "__W" with - unsigned saturation, and store the packed 32-bit results in "dst". + Multiply groups of 4 adjacent pairs of signed 8-bit integers in "__A" with corresponding unsigned 8-bit integers in "__B", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "__W" with unsigned saturation, and store the packed 32-bit results in "dst". - FOR j := 0 to 7 - tmp1.word := ZeroExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j]) - tmp2.word := ZeroExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1]) - tmp3.word := ZeroExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2]) - tmp4.word := ZeroExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3]) - dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) - ENDFOR - dst[MAX:256] := 0 +FOR j := 0 to 7 + tmp1.word := ZeroExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j]) + tmp2.word := ZeroExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1]) + tmp3.word := ZeroExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2]) + tmp4.word := ZeroExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3]) + dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +ENDFOR +dst[MAX:256] := 0 - AVX_VNNI_INT8 -
immintrin.h
- Arithmetic + AVX_VNNI_INT8 +
immintrin.h
+ Arithmetic
- Multiply groups of 4 adjacent pairs of signed 8-bit integers in "__A" with - corresponding signed 8-bit integers in "__B", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "__W", and store - the packed 32-bit results in "dst". + Multiply groups of 4 adjacent pairs of signed 8-bit integers in "__A" with corresponding signed 8-bit integers in "__B", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "__W", and store the packed 32-bit results in "dst". - FOR j := 0 to 3 - tmp1.word := SignExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]) - tmp2.word := SignExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]) - tmp3.word := SignExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2]) - tmp4.word := SignExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3]) - dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 - ENDFOR - dst[MAX:128] := 0 +FOR j := 0 to 3 + tmp1.word := SignExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]) + tmp2.word := SignExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]) + tmp3.word := SignExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2]) + tmp4.word := SignExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3]) + dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +ENDFOR +dst[MAX:128] := 0 - AVX_VNNI_INT8 -
immintrin.h
- Arithmetic + AVX_VNNI_INT8 +
immintrin.h
+ Arithmetic
- Multiply groups of 4 adjacent pairs of signed 8-bit integers in "__A" with - corresponding signed 8-bit integers in "__B", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "__W" with signed - saturation, and store the packed 32-bit results in "dst". + Multiply groups of 4 adjacent pairs of signed 8-bit integers in "__A" with corresponding signed 8-bit integers in "__B", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "__W" with signed saturation, and store the packed 32-bit results in "dst". - FOR j := 0 to 3 - tmp1.word := SignExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]) - tmp2.word := SignExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]) - tmp3.word := SignExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2]) - tmp4.word := SignExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3]) - dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) - ENDFOR - dst[MAX:128] := 0 +FOR j := 0 to 3 + tmp1.word := SignExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]) + tmp2.word := SignExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]) + tmp3.word := SignExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2]) + tmp4.word := SignExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3]) + dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +ENDFOR +dst[MAX:128] := 0 - AVX_VNNI_INT8 -
immintrin.h
- Arithmetic + AVX_VNNI_INT8 +
immintrin.h
+ Arithmetic
- Multiply groups of 4 adjacent pairs of signed 8-bit integers in "__A" with - corresponding unsigned 8-bit integers in "__B", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "__W", and store - the packed 32-bit results in "dst". + Multiply groups of 4 adjacent pairs of signed 8-bit integers in "__A" with corresponding unsigned 8-bit integers in "__B", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "__W", and store the packed 32-bit results in "dst". - FOR j := 0 to 3 - tmp1.word := Signed(SignExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j])) - tmp2.word := Signed(SignExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1])) - tmp3.word := Signed(SignExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2])) - tmp4.word := Signed(SignExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3])) - dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 - ENDFOR - dst[MAX:128] := 0 +FOR j := 0 to 3 + tmp1.word := Signed(SignExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j])) + tmp2.word := Signed(SignExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1])) + tmp3.word := Signed(SignExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2])) + tmp4.word := Signed(SignExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3])) + dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +ENDFOR +dst[MAX:128] := 0 - AVX_VNNI_INT8 -
immintrin.h
- Arithmetic + AVX_VNNI_INT8 +
immintrin.h
+ Arithmetic
- Multiply groups of 4 adjacent pairs of signed 8-bit integers in "__A" with - corresponding unsigned 8-bit integers in "__B", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "__W" with signed - saturation, and store the packed 32-bit results in "dst". + Multiply groups of 4 adjacent pairs of signed 8-bit integers in "__A" with corresponding unsigned 8-bit integers in "__B", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "__W" with signed saturation, and store the packed 32-bit results in "dst". - FOR j := 0 to 3 - tmp1.word := Signed(SignExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j])) - tmp2.word := Signed(SignExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1])) - tmp3.word := Signed(SignExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2])) - tmp4.word := Signed(SignExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3])) - dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) - ENDFOR - dst[MAX:128] := 0 +FOR j := 0 to 3 + tmp1.word := Signed(SignExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j])) + tmp2.word := Signed(SignExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1])) + tmp3.word := Signed(SignExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2])) + tmp4.word := Signed(SignExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3])) + dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +ENDFOR +dst[MAX:128] := 0 - AVX_VNNI_INT8 -
immintrin.h
- Arithmetic + AVX_VNNI_INT8 +
immintrin.h
+ Arithmetic
- Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "__A" with - corresponding unsigned 8-bit integers in "__B", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "__W", and store - the packed 32-bit results in "dst". + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "__A" with corresponding unsigned 8-bit integers in "__B", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "__W", and store the packed 32-bit results in "dst". - FOR j := 0 to 3 - tmp1.word := ZeroExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j]) - tmp2.word := ZeroExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1]) - tmp3.word := ZeroExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2]) - tmp4.word := ZeroExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3]) - dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 - ENDFOR - dst[MAX:128] := 0 +FOR j := 0 to 3 + tmp1.word := ZeroExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j]) + tmp2.word := ZeroExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1]) + tmp3.word := ZeroExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2]) + tmp4.word := ZeroExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3]) + dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +ENDFOR +dst[MAX:128] := 0 - AVX_VNNI_INT8 -
immintrin.h
- Arithmetic + AVX_VNNI_INT8 +
immintrin.h
+ Arithmetic
- Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "__A" with - corresponding unsigned 8-bit integers in "__B", producing 4 intermediate signed 16-bit - results. Sum these 4 results with the corresponding 32-bit integer in "__W" with - unsigned saturation, and store the packed 32-bit results in "dst". + Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "__A" with corresponding unsigned 8-bit integers in "__B", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "__W" with unsigned saturation, and store the packed 32-bit results in "dst". - FOR j := 0 to 3 - tmp1.word := ZeroExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j]) - tmp2.word := ZeroExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1]) - tmp3.word := ZeroExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2]) - tmp4.word := ZeroExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3]) - dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) - ENDFOR - dst[MAX:128] := 0 +FOR j := 0 to 3 + tmp1.word := ZeroExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j]) + tmp2.word := ZeroExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1]) + tmp3.word := ZeroExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2]) + tmp4.word := ZeroExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3]) + dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +ENDFOR +dst[MAX:128] := 0 - AVX_VNNI_INT8 -
immintrin.h
- Arithmetic -
- - - - - - - Extract contiguous bits from unsigned 32-bit integer "a", and store the result - in "dst". Extract the number of bits specified by "len", starting at the bit specified - by "start". - - tmp[511:0] := a - dst[31:0] := ZeroExtend32(tmp[(start[7:0] + len[7:0] - 1):start[7:0]]) - - - BMI1 -
immintrin.h
- Bit Manipulation + AVX_VNNI_INT8 +
immintrin.h
+ Arithmetic +
+ + + + + + + Extract contiguous bits from unsigned 32-bit integer "a", and store the result in "dst". Extract the number of bits specified by "len", starting at the bit specified by "start". + +tmp[511:0] := a +dst[31:0] := ZeroExtend32(tmp[(start[7:0] + len[7:0] - 1):start[7:0]]) + + + BMI1 +
immintrin.h
+ Bit Manipulation
- - - - Extract contiguous bits from unsigned 32-bit integer "a", and store the result - in "dst". Extract the number of bits specified by bits 15:8 of "control", starting at - the bit specified by bits 0:7 of "control". - - start := control[7:0] - len := control[15:8] - tmp[511:0] := a - dst[31:0] := ZeroExtend32(tmp[(start[7:0] + len[7:0] - 1):start[7:0]]) - - - BMI1 -
immintrin.h
- Bit Manipulation + + + + Extract contiguous bits from unsigned 32-bit integer "a", and store the result in "dst". Extract the number of bits specified by bits 15:8 of "control", starting at the bit specified by bits 0:7 of "control". + +start := control[7:0] +len := control[15:8] +tmp[511:0] := a +dst[31:0] := ZeroExtend32(tmp[(start[7:0] + len[7:0] - 1):start[7:0]]) + + + BMI1 +
immintrin.h
+ Bit Manipulation
- - - - - Extract contiguous bits from unsigned 64-bit integer "a", and store the result - in "dst". Extract the number of bits specified by "len", starting at the bit specified - by "start". - - tmp[511:0] := a - dst[63:0] := ZeroExtend64(tmp[(start[7:0] + len[7:0] - 1):start[7:0]]) - - - BMI1 -
immintrin.h
- Bit Manipulation + + + + + Extract contiguous bits from unsigned 64-bit integer "a", and store the result in "dst". Extract the number of bits specified by "len", starting at the bit specified by "start". + +tmp[511:0] := a +dst[63:0] := ZeroExtend64(tmp[(start[7:0] + len[7:0] - 1):start[7:0]]) + + + BMI1 +
immintrin.h
+ Bit Manipulation
- - - - Extract contiguous bits from unsigned 64-bit integer "a", and store the result - in "dst". Extract the number of bits specified by bits 15:8 of "control", starting at - the bit specified by bits 0:7 of "control".. - - start := control[7:0] - len := control[15:8] - tmp[511:0] := a - dst[63:0] := ZeroExtend64(tmp[(start[7:0] + len[7:0] - 1):start[7:0]]) - - - BMI1 -
immintrin.h
- Bit Manipulation + + + + Extract contiguous bits from unsigned 64-bit integer "a", and store the result in "dst". Extract the number of bits specified by bits 15:8 of "control", starting at the bit specified by bits 0:7 of "control".. + +start := control[7:0] +len := control[15:8] +tmp[511:0] := a +dst[63:0] := ZeroExtend64(tmp[(start[7:0] + len[7:0] - 1):start[7:0]]) + + + BMI1 +
immintrin.h
+ Bit Manipulation
- - - Extract the lowest set bit from unsigned 32-bit integer "a" and set the - corresponding bit in "dst". All other bits in "dst" are zeroed, and all bits are zeroed - if no bits are set in "a". - - dst := (-a) AND a - - - BMI1 -
immintrin.h
- Bit Manipulation + + + Extract the lowest set bit from unsigned 32-bit integer "a" and set the corresponding bit in "dst". All other bits in "dst" are zeroed, and all bits are zeroed if no bits are set in "a". + +dst := (-a) AND a + + + BMI1 +
immintrin.h
+ Bit Manipulation
- - - Extract the lowest set bit from unsigned 64-bit integer "a" and set the - corresponding bit in "dst". All other bits in "dst" are zeroed, and all bits are zeroed - if no bits are set in "a". - - dst := (-a) AND a - - - BMI1 -
immintrin.h
- Bit Manipulation + + + Extract the lowest set bit from unsigned 64-bit integer "a" and set the corresponding bit in "dst". All other bits in "dst" are zeroed, and all bits are zeroed if no bits are set in "a". + +dst := (-a) AND a + + + BMI1 +
immintrin.h
+ Bit Manipulation
- - - Set all the lower bits of "dst" up to and including the lowest set bit in - unsigned 32-bit integer "a". - - dst := (a - 1) XOR a - - - BMI1 -
immintrin.h
- Bit Manipulation + + + Set all the lower bits of "dst" up to and including the lowest set bit in unsigned 32-bit integer "a". + +dst := (a - 1) XOR a + + + BMI1 +
immintrin.h
+ Bit Manipulation
- - - Set all the lower bits of "dst" up to and including the lowest set bit in - unsigned 64-bit integer "a". - - dst := (a - 1) XOR a - - - BMI1 -
immintrin.h
- Bit Manipulation + + + Set all the lower bits of "dst" up to and including the lowest set bit in unsigned 64-bit integer "a". + +dst := (a - 1) XOR a + + + BMI1 +
immintrin.h
+ Bit Manipulation
- - - Copy all bits from unsigned 32-bit integer "a" to "dst", and reset (set to 0) - the bit in "dst" that corresponds to the lowest set bit in "a". - - dst := (a - 1) AND a - - - BMI1 -
immintrin.h
- Bit Manipulation + + + Copy all bits from unsigned 32-bit integer "a" to "dst", and reset (set to 0) the bit in "dst" that corresponds to the lowest set bit in "a". + +dst := (a - 1) AND a + + + BMI1 +
immintrin.h
+ Bit Manipulation
- - - Copy all bits from unsigned 64-bit integer "a" to "dst", and reset (set to 0) - the bit in "dst" that corresponds to the lowest set bit in "a". - - dst := (a - 1) AND a - - - BMI1 -
immintrin.h
- Bit Manipulation + + + Copy all bits from unsigned 64-bit integer "a" to "dst", and reset (set to 0) the bit in "dst" that corresponds to the lowest set bit in "a". + +dst := (a - 1) AND a + + + BMI1 +
immintrin.h
+ Bit Manipulation
- - - - Compute the bitwise NOT of 32-bit integer "a" and then AND with b, and store - the results in dst. - - dst[31:0] := ((NOT a[31:0]) AND b[31:0]) - - - BMI1 -
immintrin.h
- Bit Manipulation + + + + Compute the bitwise NOT of 32-bit integer "a" and then AND with b, and store the results in dst. + +dst[31:0] := ((NOT a[31:0]) AND b[31:0]) + + + BMI1 +
immintrin.h
+ Bit Manipulation
- - - - Compute the bitwise NOT of 64-bit integer "a" and then AND with b, and store - the results in dst. - - dst[63:0] := ((NOT a[63:0]) AND b[63:0]) - - - BMI1 -
immintrin.h
- Bit Manipulation + + + + Compute the bitwise NOT of 64-bit integer "a" and then AND with b, and store the results in dst. + +dst[63:0] := ((NOT a[63:0]) AND b[63:0]) + + + BMI1 +
immintrin.h
+ Bit Manipulation
- - - Count the number of trailing zero bits in unsigned 16-bit integer "a", and - return that count in "dst". - - tmp := 0 - dst := 0 - DO WHILE ((tmp < 16) AND a[tmp] == 0) - tmp := tmp + 1 - dst := dst + 1 - OD - - - BMI1 -
immintrin.h
- Bit Manipulation + + + Count the number of trailing zero bits in unsigned 16-bit integer "a", and return that count in "dst". + +tmp := 0 +dst := 0 +DO WHILE ((tmp < 16) AND a[tmp] == 0) + tmp := tmp + 1 + dst := dst + 1 +OD + + + BMI1 +
immintrin.h
+ Bit Manipulation
- - - Count the number of trailing zero bits in unsigned 32-bit integer "a", and - return that count in "dst". - - tmp := 0 - dst := 0 - DO WHILE ((tmp < 32) AND a[tmp] == 0) - tmp := tmp + 1 - dst := dst + 1 - OD - - - BMI1 -
immintrin.h
- Bit Manipulation + + + Count the number of trailing zero bits in unsigned 32-bit integer "a", and return that count in "dst". + +tmp := 0 +dst := 0 +DO WHILE ((tmp < 32) AND a[tmp] == 0) + tmp := tmp + 1 + dst := dst + 1 +OD + + + BMI1 +
immintrin.h
+ Bit Manipulation
- - - Count the number of trailing zero bits in unsigned 64-bit integer "a", and - return that count in "dst". - - tmp := 0 - dst := 0 - DO WHILE ((tmp < 64) AND a[tmp] == 0) - tmp := tmp + 1 - dst := dst + 1 - OD - - - BMI1 -
immintrin.h
- Bit Manipulation + + + Count the number of trailing zero bits in unsigned 64-bit integer "a", and return that count in "dst". + +tmp := 0 +dst := 0 +DO WHILE ((tmp < 64) AND a[tmp] == 0) + tmp := tmp + 1 + dst := dst + 1 +OD + + + BMI1 +
immintrin.h
+ Bit Manipulation
- - - Count the number of trailing zero bits in unsigned 32-bit integer "a", and - return that count in "dst". - - tmp := 0 - dst := 0 - DO WHILE ((tmp < 32) AND a[tmp] == 0) - tmp := tmp + 1 - dst := dst + 1 - OD - - - BMI1 -
immintrin.h
- Bit Manipulation + + + Count the number of trailing zero bits in unsigned 32-bit integer "a", and return that count in "dst". + +tmp := 0 +dst := 0 +DO WHILE ((tmp < 32) AND a[tmp] == 0) + tmp := tmp + 1 + dst := dst + 1 +OD + + + BMI1 +
immintrin.h
+ Bit Manipulation
- - - Count the number of trailing zero bits in unsigned 64-bit integer "a", and - return that count in "dst". - - tmp := 0 - dst := 0 - DO WHILE ((tmp < 64) AND a[tmp] == 0) - tmp := tmp + 1 - dst := dst + 1 - OD - - - BMI1 -
immintrin.h
- Bit Manipulation -
- - - - - - - Copy all bits from unsigned 32-bit integer "a" to "dst", and reset (set to 0) - the high bits in "dst" starting at "index". - - n := index[7:0] - dst := a - IF (n < 32) - dst[31:n] := 0 - FI - - - BMI2 -
immintrin.h
- Bit Manipulation + + + Count the number of trailing zero bits in unsigned 64-bit integer "a", and return that count in "dst". + +tmp := 0 +dst := 0 +DO WHILE ((tmp < 64) AND a[tmp] == 0) + tmp := tmp + 1 + dst := dst + 1 +OD + + + BMI1 +
immintrin.h
+ Bit Manipulation +
+ + + + + + + Copy all bits from unsigned 32-bit integer "a" to "dst", and reset (set to 0) the high bits in "dst" starting at "index". + +n := index[7:0] +dst := a +IF (n < 32) + dst[31:n] := 0 +FI + + + BMI2 +
immintrin.h
+ Bit Manipulation
- - - - Copy all bits from unsigned 64-bit integer "a" to "dst", and reset (set to 0) - the high bits in "dst" starting at "index". - - n := index[7:0] - dst := a - IF (n < 64) - dst[63:n] := 0 - FI - - - BMI2 -
immintrin.h
- Bit Manipulation + + + + Copy all bits from unsigned 64-bit integer "a" to "dst", and reset (set to 0) the high bits in "dst" starting at "index". + +n := index[7:0] +dst := a +IF (n < 64) + dst[63:n] := 0 +FI + + + BMI2 +
immintrin.h
+ Bit Manipulation
- - - - Deposit contiguous low bits from unsigned 32-bit integer "a" to "dst" at the - corresponding bit locations specified by "mask"; all other bits in "dst" are set to - zero. - - tmp := a - dst := 0 - m := 0 - k := 0 - DO WHILE m < 32 - IF mask[m] == 1 - dst[m] := tmp[k] - k := k + 1 - FI - m := m + 1 - OD - - - BMI2 -
immintrin.h
- Bit Manipulation + + + + Deposit contiguous low bits from unsigned 32-bit integer "a" to "dst" at the corresponding bit locations specified by "mask"; all other bits in "dst" are set to zero. + +tmp := a +dst := 0 +m := 0 +k := 0 +DO WHILE m < 32 + IF mask[m] == 1 + dst[m] := tmp[k] + k := k + 1 + FI + m := m + 1 +OD + + + BMI2 +
immintrin.h
+ Bit Manipulation
- - - - Deposit contiguous low bits from unsigned 64-bit integer "a" to "dst" at the - corresponding bit locations specified by "mask"; all other bits in "dst" are set to - zero. - - tmp := a - dst := 0 - m := 0 - k := 0 - DO WHILE m < 64 - IF mask[m] == 1 - dst[m] := tmp[k] - k := k + 1 - FI - m := m + 1 - OD - - - BMI2 -
immintrin.h
- Bit Manipulation + + + + Deposit contiguous low bits from unsigned 64-bit integer "a" to "dst" at the corresponding bit locations specified by "mask"; all other bits in "dst" are set to zero. + +tmp := a +dst := 0 +m := 0 +k := 0 +DO WHILE m < 64 + IF mask[m] == 1 + dst[m] := tmp[k] + k := k + 1 + FI + m := m + 1 +OD + + + BMI2 +
immintrin.h
+ Bit Manipulation
- - - - Extract bits from unsigned 32-bit integer "a" at the corresponding bit - locations specified by "mask" to contiguous low bits in "dst"; the remaining upper bits - in "dst" are set to zero. - - tmp := a - dst := 0 - m := 0 - k := 0 - DO WHILE m < 32 - IF mask[m] == 1 - dst[k] := tmp[m] - k := k + 1 - FI - m := m + 1 - OD - - - BMI2 -
immintrin.h
- Bit Manipulation + + + + Extract bits from unsigned 32-bit integer "a" at the corresponding bit locations specified by "mask" to contiguous low bits in "dst"; the remaining upper bits in "dst" are set to zero. + +tmp := a +dst := 0 +m := 0 +k := 0 +DO WHILE m < 32 + IF mask[m] == 1 + dst[k] := tmp[m] + k := k + 1 + FI + m := m + 1 +OD + + + BMI2 +
immintrin.h
+ Bit Manipulation
- - - - Extract bits from unsigned 64-bit integer "a" at the corresponding bit - locations specified by "mask" to contiguous low bits in "dst"; the remaining upper bits - in "dst" are set to zero. - - tmp := a - dst := 0 - m := 0 - k := 0 - DO WHILE m < 64 - IF mask[m] == 1 - dst[k] := tmp[m] - k := k + 1 - FI - m := m + 1 - OD - - - BMI2 -
immintrin.h
- Bit Manipulation + + + + Extract bits from unsigned 64-bit integer "a" at the corresponding bit locations specified by "mask" to contiguous low bits in "dst"; the remaining upper bits in "dst" are set to zero. + +tmp := a +dst := 0 +m := 0 +k := 0 +DO WHILE m < 64 + IF mask[m] == 1 + dst[k] := tmp[m] + k := k + 1 + FI + m := m + 1 +OD + + + BMI2 +
immintrin.h
+ Bit Manipulation
- - - - - Multiply unsigned 32-bit integers "a" and "b", store the low 32-bits of the - result in "dst", and store the high 32-bits in "hi". This does not read or write - arithmetic flags. - - dst[31:0] := (a * b)[31:0] - MEM[hi+31:hi] := (a * b)[63:32] - - - BMI2 -
immintrin.h
- Arithmetic + + + + + Multiply unsigned 32-bit integers "a" and "b", store the low 32-bits of the result in "dst", and store the high 32-bits in "hi". This does not read or write arithmetic flags. + +dst[31:0] := (a * b)[31:0] +MEM[hi+31:hi] := (a * b)[63:32] + + + BMI2 +
immintrin.h
+ Arithmetic
- - - - - Multiply unsigned 64-bit integers "a" and "b", store the low 64-bits of the - result in "dst", and store the high 64-bits in "hi". This does not read or write - arithmetic flags. - - dst[63:0] := (a * b)[63:0] - MEM[hi+63:hi] := (a * b)[127:64] - - - BMI2 -
immintrin.h
- Arithmetic -
- - - - - - Increment the shadow stack pointer by 4 times the value specified in bits [7:0] - of "a". - - SSP := SSP + a[7:0] * 4 - - - CET_SS -
immintrin.h
- Miscellaneous + + + + + Multiply unsigned 64-bit integers "a" and "b", store the low 64-bits of the result in "dst", and store the high 64-bits in "hi". This does not read or write arithmetic flags. + +dst[63:0] := (a * b)[63:0] +MEM[hi+63:hi] := (a * b)[127:64] + + + BMI2 +
immintrin.h
+ Arithmetic +
+ + + + + + Increment the shadow stack pointer by 4 times the value specified in bits [7:0] of "a". + +SSP := SSP + a[7:0] * 4 + + + CET_SS +
immintrin.h
+ Miscellaneous
- - - Increment the shadow stack pointer by 8 times the value specified in bits [7:0] - of "a". - - SSP := SSP + a[7:0] * 8 - - - CET_SS -
immintrin.h
- Miscellaneous + + + Increment the shadow stack pointer by 8 times the value specified in bits [7:0] of "a". + +SSP := SSP + a[7:0] * 8 + + + CET_SS +
immintrin.h
+ Miscellaneous
- - - Read the low 32-bits of the current shadow stack pointer, and store the result - in "dst". - dst := SSP[31:0] - - - CET_SS -
immintrin.h
- Miscellaneous + + + Read the low 32-bits of the current shadow stack pointer, and store the result in "dst". + dst := SSP[31:0] + + + CET_SS +
immintrin.h
+ Miscellaneous
- - - Read the current shadow stack pointer, and store the result in "dst". - dst := SSP[63:0] - - - CET_SS -
immintrin.h
- Miscellaneous + + + Read the current shadow stack pointer, and store the result in "dst". + dst := SSP[63:0] + + + CET_SS +
immintrin.h
+ Miscellaneous
- - - Save the previous shadow stack pointer context. - - CET_SS -
immintrin.h
- Miscellaneous + + + Save the previous shadow stack pointer context. + + CET_SS +
immintrin.h
+ Miscellaneous
- - - Restore the saved shadow stack pointer from the shadow stack restore token - previously created on shadow stack by saveprevssp. - - CET_SS -
immintrin.h
- Miscellaneous + + + Restore the saved shadow stack pointer from the shadow stack restore token previously created on shadow stack by saveprevssp. + + CET_SS +
immintrin.h
+ Miscellaneous
- - - - Write 32-bit value in "val" to a shadow stack page in memory specified by "p". - - CET_SS -
immintrin.h
- Miscellaneous + + + + Write 32-bit value in "val" to a shadow stack page in memory specified by "p". + + CET_SS +
immintrin.h
+ Miscellaneous
- - - - Write 64-bit value in "val" to a shadow stack page in memory specified by "p". - - CET_SS -
immintrin.h
- Miscellaneous + + + + Write 64-bit value in "val" to a shadow stack page in memory specified by "p". + + CET_SS +
immintrin.h
+ Miscellaneous
- - - - Write 32-bit value in "val" to a user shadow stack page in memory specified by - "p". - - CET_SS -
immintrin.h
- Miscellaneous + + + + Write 32-bit value in "val" to a user shadow stack page in memory specified by "p". + + CET_SS +
immintrin.h
+ Miscellaneous
- - - - Write 64-bit value in "val" to a user shadow stack page in memory specified by - "p". - - CET_SS -
immintrin.h
- Miscellaneous + + + + Write 64-bit value in "val" to a user shadow stack page in memory specified by "p". + + CET_SS +
immintrin.h
+ Miscellaneous
- - - Mark shadow stack pointed to by IA32_PL0_SSP as busy. - - CET_SS -
immintrin.h
- Miscellaneous + + + Mark shadow stack pointed to by IA32_PL0_SSP as busy. + + CET_SS +
immintrin.h
+ Miscellaneous
- - - Mark shadow stack pointed to by "p" as not busy. - - CET_SS -
immintrin.h
- Miscellaneous + + + Mark shadow stack pointed to by "p" as not busy. + + CET_SS +
immintrin.h
+ Miscellaneous
- - - If CET is enabled, read the low 32-bits of the current shadow stack pointer, - and store the result in "dst". Otherwise return 0. - dst := SSP[31:0] - - - CET_SS -
immintrin.h
- Miscellaneous + + + If CET is enabled, read the low 32-bits of the current shadow stack pointer, and store the result in "dst". Otherwise return 0. + dst := SSP[31:0] + + + CET_SS +
immintrin.h
+ Miscellaneous
- - - - If CET is enabled, read the current shadow stack pointer, and store the result - in "dst". Otherwise return 0. - dst := SSP[63:0] - - - CET_SS -
immintrin.h
- Miscellaneous + + + + If CET is enabled, read the current shadow stack pointer, and store the result in "dst". Otherwise return 0. + dst := SSP[63:0] + + + CET_SS +
immintrin.h
+ Miscellaneous
- - - Increment the shadow stack pointer by 4 times the value specified in bits [7:0] - of "a". - - SSP := SSP + a[7:0] * 4 - - - CET_SS -
immintrin.h
- Miscellaneous -
- - - - - Hint to hardware that the cache line that contains "p" should be demoted from - the cache closest to the processor core to a level more distant from the processor core. - - CLDEMOTE -
immintrin.h
- Miscellaneous -
- - - - - - Invalidate and flush the cache line that contains "p" from all levels of the - cache hierarchy. - - CLFLUSHOPT -
immintrin.h
- General Support -
- - - - - - Write back to memory the cache line that contains "p" from any level of the - cache hierarchy in the cache coherence domain. - - CLWB -
immintrin.h
- General Support -
- - + + + Increment the shadow stack pointer by 4 times the value specified in bits [7:0] of "a". + +SSP := SSP + a[7:0] * 4 + + + CET_SS +
immintrin.h
+ Miscellaneous +
+ + + + + Hint to hardware that the cache line that contains "p" should be demoted from the cache closest to the processor core to a level more distant from the processor core. + + CLDEMOTE +
immintrin.h
+ Miscellaneous +
+ + + + + + Invalidate and flush the cache line that contains "p" from all levels of the cache hierarchy. + + CLFLUSHOPT +
immintrin.h
+ General Support +
+ + + + + + Write back to memory the cache line that contains "p" from any level of the cache hierarchy in the cache coherence domain. + + CLWB +
immintrin.h
+ General Support +
+ + - - - - - - Compares the value from the memory "__A" with the value of "__B". If the - specified condition "__D" is met, then add the third operand "__C" to the "__A" and - write it into "__A", else the value of "__A" is unchanged. The return value is the - original value of "__A". - CASE (__D[3:0]) OF - 0: OP := _CMPCCX_O - 1: OP := _CMPCCX_NO - 2: OP := _CMPCCX_B - 3: OP := _CMPCCX_NB - 4: OP := _CMPCCX_Z - 5: OP := _CMPCCX_NZ - 6: OP := _CMPCCX_BE - 7: OP := _CMPCCX_NBE - 8: OP := _CMPCCX_S - 9: OP := _CMPCCX_NS - 10: OP := _CMPCCX_P - 11: OP := _CMPCCX_NP - 12: OP := _CMPCCX_L - 13: OP := _CMPCCX_NL - 14: OP := _CMPCCX_LE - 15: OP := _CMPCCX_NLE - ESAC - tmp1 := LOAD_LOCK(__A) - tmp2 := tmp1 + __C - IF (tmp1[31:0] OP __B[31:0]) - STORE_UNLOCK(__A, tmp2) - ELSE - STORE_UNLOCK(__A, tmp1) - FI - dst[31:0] := tmp1[31:0] - - - - - - - - - - - - - - - - - - CMPCCXADD -
immintrin.h
- Arithmetic + + + + + + Compares the value from the memory "__A" with the value of "__B". If the specified condition "__D" is met, then add the third operand "__C" to the "__A" and write it into "__A", else the value of "__A" is unchanged. The return value is the original value of "__A". + CASE (__D[3:0]) OF +0: OP := _CMPCCX_O +1: OP := _CMPCCX_NO +2: OP := _CMPCCX_B +3: OP := _CMPCCX_NB +4: OP := _CMPCCX_Z +5: OP := _CMPCCX_NZ +6: OP := _CMPCCX_BE +7: OP := _CMPCCX_NBE +8: OP := _CMPCCX_S +9: OP := _CMPCCX_NS +10: OP := _CMPCCX_P +11: OP := _CMPCCX_NP +12: OP := _CMPCCX_L +13: OP := _CMPCCX_NL +14: OP := _CMPCCX_LE +15: OP := _CMPCCX_NLE +ESAC +tmp1 := LOAD_LOCK(__A) +tmp2 := tmp1 + __C +IF (tmp1[31:0] OP __B[31:0]) + STORE_UNLOCK(__A, tmp2) +ELSE + STORE_UNLOCK(__A, tmp1) +FI +dst[31:0] := tmp1[31:0] + + + + + + + + + + + + + + + + + + CMPCCXADD +
immintrin.h
+ Arithmetic
- - - - - - Compares the value from the memory "__A" with the value of "__B". If the - specified condition "__D" is met, then add the third operand "__C" to the "__A" and - write it into "__A", else the value of "__A" is unchanged. The return value is the - original value of "__A". - CASE (__D[3:0]) OF - 0: OP := _CMPCCX_O - 1: OP := _CMPCCX_NO - 2: OP := _CMPCCX_B - 3: OP := _CMPCCX_NB - 4: OP := _CMPCCX_Z - 5: OP := _CMPCCX_NZ - 6: OP := _CMPCCX_BE - 7: OP := _CMPCCX_NBE - 8: OP := _CMPCCX_S - 9: OP := _CMPCCX_NS - 10: OP := _CMPCCX_P - 11: OP := _CMPCCX_NP - 12: OP := _CMPCCX_L - 13: OP := _CMPCCX_NL - 14: OP := _CMPCCX_LE - 15: OP := _CMPCCX_NLE - ESAC - tmp1 := LOAD_LOCK(__A) - tmp2 := tmp1 + __C - IF (tmp1[63:0] OP __B[63:0]) - STORE_UNLOCK(__A, tmp2) - ELSE - STORE_UNLOCK(__A, tmp1) - FI - dst[63:0] := tmp1[63:0] - - - - - - - - - - - - - - - - - - CMPCCXADD -
immintrin.h
- Arithmetic + + + + + + Compares the value from the memory "__A" with the value of "__B". If the specified condition "__D" is met, then add the third operand "__C" to the "__A" and write it into "__A", else the value of "__A" is unchanged. The return value is the original value of "__A". + CASE (__D[3:0]) OF +0: OP := _CMPCCX_O +1: OP := _CMPCCX_NO +2: OP := _CMPCCX_B +3: OP := _CMPCCX_NB +4: OP := _CMPCCX_Z +5: OP := _CMPCCX_NZ +6: OP := _CMPCCX_BE +7: OP := _CMPCCX_NBE +8: OP := _CMPCCX_S +9: OP := _CMPCCX_NS +10: OP := _CMPCCX_P +11: OP := _CMPCCX_NP +12: OP := _CMPCCX_L +13: OP := _CMPCCX_NL +14: OP := _CMPCCX_LE +15: OP := _CMPCCX_NLE +ESAC +tmp1 := LOAD_LOCK(__A) +tmp2 := tmp1 + __C +IF (tmp1[63:0] OP __B[63:0]) + STORE_UNLOCK(__A, tmp2) +ELSE + STORE_UNLOCK(__A, tmp1) +FI +dst[63:0] := tmp1[63:0] + + + + + + + + + + + + + + + + + + CMPCCXADD +
immintrin.h
+ Arithmetic
- - - - Starting with the initial value in "crc", accumulates a CRC32 value for - unsigned 8-bit integer "v", and stores the result in "dst". - tmp1[7:0] := v[0:7] // bit reflection - tmp2[31:0] := crc[0:31] // bit reflection - tmp3[39:0] := tmp1[7:0] << 32 - tmp4[39:0] := tmp2[31:0] << 8 - tmp5[39:0] := tmp3[39:0] XOR tmp4[39:0] - tmp6[31:0] := MOD2(tmp5[39:0], 0x11EDC6F41) // remainder from polynomial division - modulus 2 - dst[31:0] := tmp6[0:31] // bit reflection - - - CRC32 -
nmmintrin.h
- Cryptography + + + + Starting with the initial value in "crc", accumulates a CRC32 value for unsigned 8-bit integer "v", and stores the result in "dst". + tmp1[7:0] := v[0:7] // bit reflection +tmp2[31:0] := crc[0:31] // bit reflection +tmp3[39:0] := tmp1[7:0] << 32 +tmp4[39:0] := tmp2[31:0] << 8 +tmp5[39:0] := tmp3[39:0] XOR tmp4[39:0] +tmp6[31:0] := MOD2(tmp5[39:0], 0x11EDC6F41) // remainder from polynomial division modulus 2 +dst[31:0] := tmp6[0:31] // bit reflection + + + CRC32 +
nmmintrin.h
+ Cryptography
- - - - Starting with the initial value in "crc", accumulates a CRC32 value for - unsigned 16-bit integer "v", and stores the result in "dst". - tmp1[15:0] := v[0:15] // bit reflection - tmp2[31:0] := crc[0:31] // bit reflection - tmp3[47:0] := tmp1[15:0] << 32 - tmp4[47:0] := tmp2[31:0] << 16 - tmp5[47:0] := tmp3[47:0] XOR tmp4[47:0] - tmp6[31:0] := MOD2(tmp5[47:0], 0x11EDC6F41) // remainder from polynomial division - modulus 2 - dst[31:0] := tmp6[0:31] // bit reflection - - - CRC32 -
nmmintrin.h
- Cryptography + + + + Starting with the initial value in "crc", accumulates a CRC32 value for unsigned 16-bit integer "v", and stores the result in "dst". + tmp1[15:0] := v[0:15] // bit reflection +tmp2[31:0] := crc[0:31] // bit reflection +tmp3[47:0] := tmp1[15:0] << 32 +tmp4[47:0] := tmp2[31:0] << 16 +tmp5[47:0] := tmp3[47:0] XOR tmp4[47:0] +tmp6[31:0] := MOD2(tmp5[47:0], 0x11EDC6F41) // remainder from polynomial division modulus 2 +dst[31:0] := tmp6[0:31] // bit reflection + + + CRC32 +
nmmintrin.h
+ Cryptography
- - - - Starting with the initial value in "crc", accumulates a CRC32 value for - unsigned 32-bit integer "v", and stores the result in "dst". - tmp1[31:0] := v[0:31] // bit reflection - tmp2[31:0] := crc[0:31] // bit reflection - tmp3[63:0] := tmp1[31:0] << 32 - tmp4[63:0] := tmp2[31:0] << 32 - tmp5[63:0] := tmp3[63:0] XOR tmp4[63:0] - tmp6[31:0] := MOD2(tmp5[63:0], 0x11EDC6F41) // remainder from polynomial division - modulus 2 - dst[31:0] := tmp6[0:31] // bit reflection - - - CRC32 -
nmmintrin.h
- Cryptography + + + + Starting with the initial value in "crc", accumulates a CRC32 value for unsigned 32-bit integer "v", and stores the result in "dst". + tmp1[31:0] := v[0:31] // bit reflection +tmp2[31:0] := crc[0:31] // bit reflection +tmp3[63:0] := tmp1[31:0] << 32 +tmp4[63:0] := tmp2[31:0] << 32 +tmp5[63:0] := tmp3[63:0] XOR tmp4[63:0] +tmp6[31:0] := MOD2(tmp5[63:0], 0x11EDC6F41) // remainder from polynomial division modulus 2 +dst[31:0] := tmp6[0:31] // bit reflection + + + CRC32 +
nmmintrin.h
+ Cryptography
- - - - Starting with the initial value in "crc", accumulates a CRC32 value for - unsigned 64-bit integer "v", and stores the result in "dst". - tmp1[63:0] := v[0:63] // bit reflection - tmp2[31:0] := crc[0:31] // bit reflection - tmp3[95:0] := tmp1[31:0] << 32 - tmp4[95:0] := tmp2[63:0] << 64 - tmp5[95:0] := tmp3[95:0] XOR tmp4[95:0] - tmp6[31:0] := MOD2(tmp5[95:0], 0x11EDC6F41) // remainder from polynomial division - modulus 2 - dst[31:0] := tmp6[0:31] // bit reflection - - - CRC32 -
nmmintrin.h
- Cryptography -
- - - - - - - Reads 64-byte command pointed by "__src", formats 64-byte enqueue store data, - and performs 64-byte enqueue store to memory pointed by "__dst". This intrinsics may - only be used in User mode. - - ENQCMD -
immintrin.h
- Unknown + + + + Starting with the initial value in "crc", accumulates a CRC32 value for unsigned 64-bit integer "v", and stores the result in "dst". + tmp1[63:0] := v[0:63] // bit reflection +tmp2[31:0] := crc[0:31] // bit reflection +tmp3[95:0] := tmp1[31:0] << 32 +tmp4[95:0] := tmp2[63:0] << 64 +tmp5[95:0] := tmp3[95:0] XOR tmp4[95:0] +tmp6[31:0] := MOD2(tmp5[95:0], 0x11EDC6F41) // remainder from polynomial division modulus 2 +dst[31:0] := tmp6[0:31] // bit reflection + + + CRC32 +
nmmintrin.h
+ Cryptography +
+ + + + + + + Reads 64-byte command pointed by "__src", formats 64-byte enqueue store data, and performs 64-byte enqueue store to memory pointed by "__dst". This intrinsics may only be used in User mode. + + ENQCMD +
immintrin.h
+ Unknown
- - - - Reads 64-byte command pointed by "__src", formats 64-byte enqueue store data, - and performs 64-byte enqueue store to memory pointed by "__dst" This intrinsic may only - be used in Privileged mode. - - ENQCMD -
immintrin.h
- Unknown -
- - - - - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - m := j*16 - dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) - ENDFOR - dst[MAX:256] := 0 - - - F16C -
immintrin.h
- Convert + + + + Reads 64-byte command pointed by "__src", formats 64-byte enqueue store data, and performs 64-byte enqueue store to memory pointed by "__dst" This intrinsic may only be used in Privileged mode. + + ENQCMD +
immintrin.h
+ Unknown +
+ + + + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + m := j*16 + dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) +ENDFOR +dst[MAX:256] := 0 + + + F16C +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst". - [round_imm_note] - - FOR j := 0 to 7 - i := 16*j - l := 32*j - dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) - ENDFOR - dst[MAX:128] := 0 - - - F16C -
immintrin.h
- Convert + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + [round_imm_note] + +FOR j := 0 to 7 + i := 16*j + l := 32*j + dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) +ENDFOR +dst[MAX:128] := 0 + + + F16C +
immintrin.h
+ Convert
- - - Convert packed half-precision (16-bit) floating-point elements in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - m := j*16 - dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) - ENDFOR - dst[MAX:128] := 0 - - - F16C -
immintrin.h
- Convert + + + Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + m := j*16 + dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m]) +ENDFOR +dst[MAX:128] := 0 + + + F16C +
immintrin.h
+ Convert
- - - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed half-precision (16-bit) floating-point elements, and store the results in "dst". - [round_imm_note] - - FOR j := 0 to 3 - i := 16*j - l := 32*j - dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) - ENDFOR - dst[MAX:64] := 0 - - - F16C -
immintrin.h
- Convert -
- - - - - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ENDFOR - dst[MAX:128] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". + [round_imm_note] + +FOR j := 0 to 3 + i := 16*j + l := 32*j + dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l]) +ENDFOR +dst[MAX:64] := 0 + + + F16C +
immintrin.h
+ Convert +
+ + + + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] +ENDFOR +dst[MAX:128] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ENDFOR - dst[MAX:256] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] +ENDFOR +dst[MAX:256] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ENDFOR - dst[MAX:128] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] +ENDFOR +dst[MAX:128] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the intermediate result to packed elements in "c", and store the results in - "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ENDFOR - dst[MAX:256] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and add the intermediate result to the lower element in "c". Store the result in - the lower element of "dst", and copy the upper element from "a" to the upper element of - "dst". - - dst[63:0] := (a[63:0] * b[63:0]) + c[63:0] - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := (a[63:0] * b[63:0]) + c[63:0] +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and add the intermediate result to the lower element in "c". Store the result in - the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper - elements of "dst". - - dst[31:0] := (a[31:0] * b[31:0]) + c[31:0] - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := (a[31:0] * b[31:0]) + c[31:0] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - IF ((j & 1) == 0) - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF ((j & 1) == 0) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - IF ((j & 1) == 0) - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ELSE - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF ((j & 1) == 0) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - IF ((j & 1) == 0) - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF ((j & 1) == 0) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively add and subtract packed elements in "c" to/from the intermediate - result, and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - IF ((j & 1) == 0) - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ELSE - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF ((j & 1) == 0) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ENDFOR - dst[MAX:128] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] +ENDFOR +dst[MAX:128] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - ENDFOR - dst[MAX:256] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] +ENDFOR +dst[MAX:256] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ENDFOR - dst[MAX:128] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] +ENDFOR +dst[MAX:128] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the intermediate result, and store the results - in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - ENDFOR - dst[MAX:256] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the intermediate result. Store the - result in the lower element of "dst", and copy the upper element from "a" to the upper - element of "dst". - - dst[63:0] := (a[63:0] * b[63:0]) - c[63:0] - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := (a[63:0] * b[63:0]) - c[63:0] +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the intermediate result. Store the - result in the lower element of "dst", and copy the upper 3 packed elements from "a" to - the upper elements of "dst". - - dst[31:0] := (a[31:0] * b[31:0]) - c[31:0] - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := (a[31:0] * b[31:0]) - c[31:0] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - IF ((j & 1) == 0) - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF ((j & 1) == 0) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst". - - FOR j := 0 to 3 - i := j*64 - IF ((j & 1) == 0) - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] - ELSE - dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + IF ((j & 1) == 0) + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i] + ELSE + dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - IF ((j & 1) == 0) - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - FI - ENDFOR - dst[MAX:128] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF ((j & 1) == 0) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + FI +ENDFOR +dst[MAX:128] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", alternatively subtract and add packed elements in "c" from/to the intermediate - result, and store the results in "dst". - - FOR j := 0 to 7 - i := j*32 - IF ((j & 1) == 0) - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] - ELSE - dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] - FI - ENDFOR - dst[MAX:256] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + IF ((j & 1) == 0) + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i] + ELSE + dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i] + FI +ENDFOR +dst[MAX:256] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] - ENDFOR - dst[MAX:128] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] +ENDFOR +dst[MAX:128] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] - ENDFOR - dst[MAX:256] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i] +ENDFOR +dst[MAX:256] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] - ENDFOR - dst[MAX:128] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] +ENDFOR +dst[MAX:128] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", add the negated intermediate result to packed elements in "c", and store the - results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] - ENDFOR - dst[MAX:256] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and add the negated intermediate result to the lower element in "c". Store the - result in the lower element of "dst", and copy the upper element from "a" to the upper - element of "dst". - - dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0] - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0] +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and add the negated intermediate result to the lower element in "c". Store the - result in the lower element of "dst", and copy the upper 3 packed elements from "a" to - the upper elements of "dst". - - dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0] - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] - ENDFOR - dst[MAX:128] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] +ENDFOR +dst[MAX:128] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst". - - FOR j := 0 to 3 - i := j*64 - dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] - ENDFOR - dst[MAX:256] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst". + +FOR j := 0 to 3 + i := j*64 + dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i] +ENDFOR +dst[MAX:256] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] - ENDFOR - dst[MAX:128] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] +ENDFOR +dst[MAX:128] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", subtract packed elements in "c" from the negated intermediate result, and store the - results in "dst". - - FOR j := 0 to 7 - i := j*32 - dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] - ENDFOR - dst[MAX:256] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst". + +FOR j := 0 to 7 + i := j*32 + dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i] +ENDFOR +dst[MAX:256] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower double-precision (64-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the negated intermediate result. Store - the result in the lower element of "dst", and copy the upper element from "a" to the - upper element of "dst". - - dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0] - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - - - FMA -
immintrin.h
- Arithmetic + + + + + Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0] +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic
- - - - - Multiply the lower single-precision (32-bit) floating-point elements in "a" and - "b", and subtract the lower element in "c" from the negated intermediate result. Store - the result in the lower element of "dst", and copy the upper 3 packed elements from "a" - to the upper elements of "dst". - - dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0] - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - - - FMA -
immintrin.h
- Arithmetic -
- - - - - Read the FS segment base register and store the 32-bit result in "dst". - dst[31:0] := FS_Segment_Base_Register - dst[63:32] := 0 - - - FSGSBASE -
immintrin.h
- General Support + + + + + Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0] +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + + + FMA +
immintrin.h
+ Arithmetic +
+ + + + + Read the FS segment base register and store the 32-bit result in "dst". + dst[31:0] := FS_Segment_Base_Register +dst[63:32] := 0 + + + FSGSBASE +
immintrin.h
+ General Support
- - Read the FS segment base register and store the 64-bit result in "dst". - dst[63:0] := FS_Segment_Base_Register - - - FSGSBASE -
immintrin.h
- General Support + + Read the FS segment base register and store the 64-bit result in "dst". + dst[63:0] := FS_Segment_Base_Register + + + FSGSBASE +
immintrin.h
+ General Support
- - Read the GS segment base register and store the 32-bit result in "dst". - dst[31:0] := GS_Segment_Base_Register - dst[63:32] := 0 - - - FSGSBASE -
immintrin.h
- General Support + + Read the GS segment base register and store the 32-bit result in "dst". + dst[31:0] := GS_Segment_Base_Register +dst[63:32] := 0 + + + FSGSBASE +
immintrin.h
+ General Support
- - Read the GS segment base register and store the 64-bit result in "dst". - dst[63:0] := GS_Segment_Base_Register - - - FSGSBASE -
immintrin.h
- General Support + + Read the GS segment base register and store the 64-bit result in "dst". + dst[63:0] := GS_Segment_Base_Register + + + FSGSBASE +
immintrin.h
+ General Support
- - - Write the unsigned 32-bit integer "a" to the FS segment base register. - - FS_Segment_Base_Register[31:0] := a[31:0] - FS_Segment_Base_Register[63:32] := 0 - - - FSGSBASE -
immintrin.h
- General Support + + + Write the unsigned 32-bit integer "a" to the FS segment base register. + +FS_Segment_Base_Register[31:0] := a[31:0] +FS_Segment_Base_Register[63:32] := 0 + + + FSGSBASE +
immintrin.h
+ General Support
- - - Write the unsigned 64-bit integer "a" to the FS segment base register. - - FS_Segment_Base_Register[63:0] := a[63:0] - - - FSGSBASE -
immintrin.h
- General Support + + + Write the unsigned 64-bit integer "a" to the FS segment base register. + +FS_Segment_Base_Register[63:0] := a[63:0] + + + FSGSBASE +
immintrin.h
+ General Support
- - - Write the unsigned 32-bit integer "a" to the GS segment base register. - - GS_Segment_Base_Register[31:0] := a[31:0] - GS_Segment_Base_Register[63:32] := 0 - - - FSGSBASE -
immintrin.h
- General Support + + + Write the unsigned 32-bit integer "a" to the GS segment base register. + +GS_Segment_Base_Register[31:0] := a[31:0] +GS_Segment_Base_Register[63:32] := 0 + + + FSGSBASE +
immintrin.h
+ General Support
- - - Write the unsigned 64-bit integer "a" to the GS segment base register. - - GS_Segment_Base_Register[63:0] := a[63:0] - - - FSGSBASE -
immintrin.h
- General Support -
- - - - - - Reload the x87 FPU, MMX technology, XMM, and MXCSR registers from the 512-byte - memory image at "mem_addr". This data should have been written to memory previously - using the FXSAVE instruction, and in the same format as required by the operating mode. - "mem_addr" must be aligned on a 16-byte boundary. - state_x87_fpu_mmx_sse := fxrstor(MEM[mem_addr+512*8:mem_addr]) - - - FXSR -
immintrin.h
- OS-Targeted + + + Write the unsigned 64-bit integer "a" to the GS segment base register. + +GS_Segment_Base_Register[63:0] := a[63:0] + + + FSGSBASE +
immintrin.h
+ General Support +
+ + + + + + Reload the x87 FPU, MMX technology, XMM, and MXCSR registers from the 512-byte memory image at "mem_addr". This data should have been written to memory previously using the FXSAVE instruction, and in the same format as required by the operating mode. "mem_addr" must be aligned on a 16-byte boundary. + state_x87_fpu_mmx_sse := fxrstor(MEM[mem_addr+512*8:mem_addr]) + + + FXSR +
immintrin.h
+ OS-Targeted
- - - Reload the x87 FPU, MMX technology, XMM, and MXCSR registers from the 512-byte - memory image at "mem_addr". This data should have been written to memory previously - using the FXSAVE64 instruction, and in the same format as required by the operating - mode. "mem_addr" must be aligned on a 16-byte boundary. - state_x87_fpu_mmx_sse := fxrstor64(MEM[mem_addr+512*8:mem_addr]) - - - FXSR -
immintrin.h
- OS-Targeted + + + Reload the x87 FPU, MMX technology, XMM, and MXCSR registers from the 512-byte memory image at "mem_addr". This data should have been written to memory previously using the FXSAVE64 instruction, and in the same format as required by the operating mode. "mem_addr" must be aligned on a 16-byte boundary. + state_x87_fpu_mmx_sse := fxrstor64(MEM[mem_addr+512*8:mem_addr]) + + + FXSR +
immintrin.h
+ OS-Targeted
- - - Save the current state of the x87 FPU, MMX technology, XMM, and MXCSR registers - to a 512-byte memory location at "mem_addr". The layout of the 512-byte region depends - on the operating mode. Bytes [511:464] are available for software use and will not be - overwritten by the processor. - MEM[mem_addr+512*8:mem_addr] := fxsave(state_x87_fpu_mmx_sse) - - - FXSR -
immintrin.h
- OS-Targeted + + + Save the current state of the x87 FPU, MMX technology, XMM, and MXCSR registers to a 512-byte memory location at "mem_addr". The layout of the 512-byte region depends on the operating mode. Bytes [511:464] are available for software use and will not be overwritten by the processor. + MEM[mem_addr+512*8:mem_addr] := fxsave(state_x87_fpu_mmx_sse) + + + FXSR +
immintrin.h
+ OS-Targeted
- - - Save the current state of the x87 FPU, MMX technology, XMM, and MXCSR registers - to a 512-byte memory location at "mem_addr". The layout of the 512-byte region depends - on the operating mode. Bytes [511:464] are available for software use and will not be - overwritten by the processor. - MEM[mem_addr+512*8:mem_addr] := fxsave64(state_x87_fpu_mmx_sse) - - - FXSR -
immintrin.h
- OS-Targeted -
- - - - - - - - Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). The field GF(2^8) is represented in polynomial - representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1. - - DEFINE gf2p8mul_byte(src1byte, src2byte) { - tword := 0 - FOR i := 0 to 7 - IF src2byte.bit[i] + + + Save the current state of the x87 FPU, MMX technology, XMM, and MXCSR registers to a 512-byte memory location at "mem_addr". The layout of the 512-byte region depends on the operating mode. Bytes [511:464] are available for software use and will not be overwritten by the processor. + MEM[mem_addr+512*8:mem_addr] := fxsave64(state_x87_fpu_mmx_sse) + + + FXSR +
immintrin.h
+ OS-Targeted +
+ + + + + + + + Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The field GF(2^8) is represented in polynomial representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1. + +DEFINE gf2p8mul_byte(src1byte, src2byte) { + tword := 0 + FOR i := 0 to 7 + IF src2byte.bit[i] tword := tword XOR (src1byte << i) - FI - ENDFOR - FOR i := 14 downto 8 - p := 0x11B << (i-8) - IF tword.bit[i] + FI + ENDFOR + FOR i := 14 downto 8 + p := 0x11B << (i-8) + IF tword.bit[i] tword := tword XOR p - FI - ENDFOR - RETURN tword.byte[0] - } - FOR j := 0 TO 63 - IF k[j] - dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j]) - ELSE - dst.byte[j] := 0 - FI - ENDFOR - dst[MAX:512] := 0 - - - GFNI - AVX512F -
immintrin.h
- Arithmetic + FI + ENDFOR + RETURN tword.byte[0] +} +FOR j := 0 TO 63 + IF k[j] + dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j]) + ELSE + dst.byte[j] := 0 + FI +ENDFOR +dst[MAX:512] := 0 + + + GFNI + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), - and store the results in "dst" using writemask "k" (elements are copied from "src"" when - the corresponding mask bit is not set). The field GF(2^8) is represented in polynomial - representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1. - - DEFINE gf2p8mul_byte(src1byte, src2byte) { - tword := 0 - FOR i := 0 to 7 - IF src2byte.bit[i] + + + + + + Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), and store the results in "dst" using writemask "k" (elements are copied from "src"" when the corresponding mask bit is not set). The field GF(2^8) is represented in polynomial representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1. + +DEFINE gf2p8mul_byte(src1byte, src2byte) { + tword := 0 + FOR i := 0 to 7 + IF src2byte.bit[i] tword := tword XOR (src1byte << i) - FI - ENDFOR - FOR i := 14 downto 8 - p := 0x11B << (i-8) - IF tword.bit[i] + FI + ENDFOR + FOR i := 14 downto 8 + p := 0x11B << (i-8) + IF tword.bit[i] tword := tword XOR p - FI - ENDFOR - RETURN tword.byte[0] - } - FOR j := 0 TO 63 - IF k[j] - dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j]) - ELSE - dst.byte[j] := src.byte[j] - FI - ENDFOR - dst[MAX:512] := 0 - - - GFNI - AVX512F -
immintrin.h
- Arithmetic + FI + ENDFOR + RETURN tword.byte[0] +} +FOR j := 0 TO 63 + IF k[j] + dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j]) + ELSE + dst.byte[j] := src.byte[j] + FI +ENDFOR +dst[MAX:512] := 0 +
+ + GFNI + AVX512F +
immintrin.h
+ Arithmetic
- - - - Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), - and store the results in "dst". The field GF(2^8) is represented in polynomial - representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1. - - DEFINE gf2p8mul_byte(src1byte, src2byte) { - tword := 0 - FOR i := 0 to 7 - IF src2byte.bit[i] + + + + Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), and store the results in "dst". The field GF(2^8) is represented in polynomial representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1. + +DEFINE gf2p8mul_byte(src1byte, src2byte) { + tword := 0 + FOR i := 0 to 7 + IF src2byte.bit[i] tword := tword XOR (src1byte << i) - FI - ENDFOR - FOR i := 14 downto 8 - p := 0x11B << (i-8) - IF tword.bit[i] + FI + ENDFOR + FOR i := 14 downto 8 + p := 0x11B << (i-8) + IF tword.bit[i] tword := tword XOR p - FI - ENDFOR - RETURN tword.byte[0] - } - FOR j := 0 TO 63 - dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j]) - ENDFOR - dst[MAX:512] := 0 - - - GFNI - AVX512F -
immintrin.h
- Arithmetic + FI + ENDFOR + RETURN tword.byte[0] +} +FOR j := 0 TO 63 + dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j]) +ENDFOR +dst[MAX:512] := 0 +
+ + GFNI + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Compute an affine transformation in the Galois Field 2^8. An affine - transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, - "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed - 8-bit results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - DEFINE parity(x) { - t := 0 - FOR i := 0 to 7 - t := t XOR x.bit[i] - ENDFOR - RETURN t - } - DEFINE affine_byte(tsrc2qw, src1byte, imm8) { - FOR i := 0 to 7 - retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i] - ENDFOR - RETURN retbyte - } - FOR j := 0 TO 7 - FOR i := 0 to 7 - IF k[j*8+i] + + + + + + Compute an affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed 8-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE parity(x) { + t := 0 + FOR i := 0 to 7 + t := t XOR x.bit[i] + ENDFOR + RETURN t +} +DEFINE affine_byte(tsrc2qw, src1byte, imm8) { + FOR i := 0 to 7 + retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i] + ENDFOR + RETURN retbyte +} +FOR j := 0 TO 7 + FOR i := 0 to 7 + IF k[j*8+i] dst.qword[j].byte[i] := affine_byte(A.qword[j], x.qword[j].byte[i], b) - ELSE + ELSE dst.qword[j].byte[i] := 0 - FI - ENDFOR - ENDFOR - dst[MAX:512] := 0 - - - GFNI - AVX512F -
immintrin.h
- Arithmetic + FI + ENDFOR +ENDFOR +dst[MAX:512] := 0 +
+ + GFNI + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Compute an affine transformation in the Galois Field 2^8. An affine - transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, - "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed - 8-bit results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - DEFINE parity(x) { - t := 0 - FOR i := 0 to 7 - t := t XOR x.bit[i] - ENDFOR - RETURN t - } - DEFINE affine_byte(tsrc2qw, src1byte, imm8) { - FOR i := 0 to 7 - retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i] - ENDFOR - RETURN retbyte - } - FOR j := 0 TO 7 - FOR i := 0 to 7 - IF k[j*8+i] + + + + + + + Compute an affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed 8-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE parity(x) { + t := 0 + FOR i := 0 to 7 + t := t XOR x.bit[i] + ENDFOR + RETURN t +} +DEFINE affine_byte(tsrc2qw, src1byte, imm8) { + FOR i := 0 to 7 + retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i] + ENDFOR + RETURN retbyte +} +FOR j := 0 TO 7 + FOR i := 0 to 7 + IF k[j*8+i] dst.qword[j].byte[i] := affine_byte(A.qword[j], x.qword[j].byte[i], b) - ELSE + ELSE dst.qword[j].byte[i] := src.qword[j].byte[i] - FI - ENDFOR - ENDFOR - dst[MAX:512] := 0 - - - GFNI - AVX512F -
immintrin.h
- Arithmetic + FI + ENDFOR +ENDFOR +dst[MAX:512] := 0 +
+ + GFNI + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Compute an affine transformation in the Galois Field 2^8. An affine - transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, - "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed - 8-bit results in "dst". - - DEFINE parity(x) { - t := 0 - FOR i := 0 to 7 - t := t XOR x.bit[i] - ENDFOR - RETURN t - } - DEFINE affine_byte(tsrc2qw, src1byte, imm8) { - FOR i := 0 to 7 - retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i] - ENDFOR - RETURN retbyte - } - FOR j := 0 TO 7 - FOR i := 0 to 7 - dst.qword[j].byte[i] := affine_byte(A.qword[j], x.qword[j].byte[i], b) - ENDFOR - ENDFOR - dst[MAX:512] := 0 - - - GFNI - AVX512F -
immintrin.h
- Arithmetic + + + + + Compute an affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed 8-bit results in "dst". + +DEFINE parity(x) { + t := 0 + FOR i := 0 to 7 + t := t XOR x.bit[i] + ENDFOR + RETURN t +} +DEFINE affine_byte(tsrc2qw, src1byte, imm8) { + FOR i := 0 to 7 + retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i] + ENDFOR + RETURN retbyte +} +FOR j := 0 TO 7 + FOR i := 0 to 7 + dst.qword[j].byte[i] := affine_byte(A.qword[j], x.qword[j].byte[i], b) + ENDFOR +ENDFOR +dst[MAX:512] := 0 + + + GFNI + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - Compute an inverse affine transformation in the Galois Field 2^8. An affine - transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, - "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the - 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 - + x + 1. Store the packed 8-bit results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - DEFINE parity(x) { - t := 0 - FOR i := 0 to 7 - t := t XOR x.bit[i] - ENDFOR - RETURN t - } - DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) { - FOR i := 0 to 7 - retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i] - ENDFOR - RETURN retbyte - } - FOR j := 0 TO 7 - FOR i := 0 to 7 - IF k[j*8+i] + + + + + + Compute an inverse affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 + x + 1. Store the packed 8-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + DEFINE parity(x) { + t := 0 + FOR i := 0 to 7 + t := t XOR x.bit[i] + ENDFOR + RETURN t +} +DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) { + FOR i := 0 to 7 + retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i] + ENDFOR + RETURN retbyte +} +FOR j := 0 TO 7 + FOR i := 0 to 7 + IF k[j*8+i] dst.qword[j].byte[i] := affine_inverse_byte(A.qword[j], x.qword[j].byte[i], b) - ELSE + ELSE dst.qword[j].byte[i] := 0 - FI - ENDFOR - ENDFOR - dst[MAX:512] := 0 - - - GFNI - AVX512F -
immintrin.h
- Arithmetic + FI + ENDFOR +ENDFOR +dst[MAX:512] := 0 +
+ + GFNI + AVX512F +
immintrin.h
+ Arithmetic
- - - - - - - Compute an inverse affine transformation in the Galois Field 2^8. An affine - transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, - "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the - 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 - + x + 1. Store the packed 8-bit results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - DEFINE parity(x) { - t := 0 - FOR i := 0 to 7 - t := t XOR x.bit[i] - ENDFOR - RETURN t - } - DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) { - FOR i := 0 to 7 - retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i] - ENDFOR - RETURN retbyte - } - FOR j := 0 TO 7 - FOR i := 0 to 7 - IF k[j*8+i] + + + + + + + Compute an inverse affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 + x + 1. Store the packed 8-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + DEFINE parity(x) { + t := 0 + FOR i := 0 to 7 + t := t XOR x.bit[i] + ENDFOR + RETURN t +} +DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) { + FOR i := 0 to 7 + retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i] + ENDFOR + RETURN retbyte +} +FOR j := 0 TO 7 + FOR i := 0 to 7 + IF k[j*8+i] dst.qword[j].byte[i] := affine_inverse_byte(A.qword[j], x.qword[j].byte[i], b) - ELSE + ELSE dst.qword[j].byte[i] := src.qword[j].byte[b] - FI - ENDFOR - ENDFOR - dst[MAX:512] := 0 - - - GFNI - AVX512F -
immintrin.h
- Arithmetic + FI + ENDFOR +ENDFOR +dst[MAX:512] := 0 +
+ + GFNI + AVX512F +
immintrin.h
+ Arithmetic
- - - - - Compute an inverse affine transformation in the Galois Field 2^8. An affine - transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, - "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the - 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 - + x + 1. Store the packed 8-bit results in "dst". - DEFINE parity(x) { - t := 0 - FOR i := 0 to 7 - t := t XOR x.bit[i] - ENDFOR - RETURN t - } - DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) { - FOR i := 0 to 7 - retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i] - ENDFOR - RETURN retbyte - } - FOR j := 0 TO 7 - FOR i := 0 to 7 - dst.qword[j].byte[i] := affine_inverse_byte(A.qword[j], x.qword[j].byte[i], b) - ENDFOR - ENDFOR - dst[MAX:512] := 0 - - - GFNI - AVX512F -
immintrin.h
- Arithmetic -
- - - - - - - - Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). The field GF(2^8) is represented in polynomial - representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1. - - DEFINE gf2p8mul_byte(src1byte, src2byte) { - tword := 0 - FOR i := 0 to 7 - IF src2byte.bit[i] + + + + + Compute an inverse affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 + x + 1. Store the packed 8-bit results in "dst". + DEFINE parity(x) { + t := 0 + FOR i := 0 to 7 + t := t XOR x.bit[i] + ENDFOR + RETURN t +} +DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) { + FOR i := 0 to 7 + retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i] + ENDFOR + RETURN retbyte +} +FOR j := 0 TO 7 + FOR i := 0 to 7 + dst.qword[j].byte[i] := affine_inverse_byte(A.qword[j], x.qword[j].byte[i], b) + ENDFOR +ENDFOR +dst[MAX:512] := 0 + + + GFNI + AVX512F +
immintrin.h
+ Arithmetic +
+ + + + + + + + Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The field GF(2^8) is represented in polynomial representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1. + +DEFINE gf2p8mul_byte(src1byte, src2byte) { + tword := 0 + FOR i := 0 to 7 + IF src2byte.bit[i] tword := tword XOR (src1byte << i) - FI - ENDFOR - FOR i := 14 downto 8 - p := 0x11B << (i-8) - IF tword.bit[i] + FI + ENDFOR + FOR i := 14 downto 8 + p := 0x11B << (i-8) + IF tword.bit[i] tword := tword XOR p - FI - ENDFOR - RETURN tword.byte[0] - } - FOR j := 0 TO 31 - IF k[j] - dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j]) - ELSE - dst.byte[j] := 0 - FI - ENDFOR - dst[MAX:256] := 0 - - - GFNI - AVX512VL -
immintrin.h
- Arithmetic + FI + ENDFOR + RETURN tword.byte[0] +} +FOR j := 0 TO 31 + IF k[j] + dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j]) + ELSE + dst.byte[j] := 0 + FI +ENDFOR +dst[MAX:256] := 0 + + + GFNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), - and store the results in "dst" using writemask "k" (elements are copied from "src"" when - the corresponding mask bit is not set). The field GF(2^8) is represented in polynomial - representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1. - - DEFINE gf2p8mul_byte(src1byte, src2byte) { - tword := 0 - FOR i := 0 to 7 - IF src2byte.bit[i] + + + + + + Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), and store the results in "dst" using writemask "k" (elements are copied from "src"" when the corresponding mask bit is not set). The field GF(2^8) is represented in polynomial representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1. + +DEFINE gf2p8mul_byte(src1byte, src2byte) { + tword := 0 + FOR i := 0 to 7 + IF src2byte.bit[i] tword := tword XOR (src1byte << i) - FI - ENDFOR - FOR i := 14 downto 8 - p := 0x11B << (i-8) - IF tword.bit[i] + FI + ENDFOR + FOR i := 14 downto 8 + p := 0x11B << (i-8) + IF tword.bit[i] tword := tword XOR p - FI - ENDFOR - RETURN tword.byte[0] - } - FOR j := 0 TO 31 - IF k[j] - dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j]) - ELSE - dst.byte[j] := src.byte[j] - FI - ENDFOR - dst[MAX:256] := 0 - - - GFNI - AVX512VL -
immintrin.h
- Arithmetic + FI + ENDFOR + RETURN tword.byte[0] +} +FOR j := 0 TO 31 + IF k[j] + dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j]) + ELSE + dst.byte[j] := src.byte[j] + FI +ENDFOR +dst[MAX:256] := 0 +
+ + GFNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), - and store the results in "dst". The field GF(2^8) is represented in polynomial - representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1. - - DEFINE gf2p8mul_byte(src1byte, src2byte) { - tword := 0 - FOR i := 0 to 7 - IF src2byte.bit[i] + + + + Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), and store the results in "dst". The field GF(2^8) is represented in polynomial representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1. + +DEFINE gf2p8mul_byte(src1byte, src2byte) { + tword := 0 + FOR i := 0 to 7 + IF src2byte.bit[i] tword := tword XOR (src1byte << i) - FI - ENDFOR - FOR i := 14 downto 8 - p := 0x11B << (i-8) - IF tword.bit[i] + FI + ENDFOR + FOR i := 14 downto 8 + p := 0x11B << (i-8) + IF tword.bit[i] tword := tword XOR p - FI - ENDFOR - RETURN tword.byte[0] - } - FOR j := 0 TO 31 - dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j]) - ENDFOR - dst[MAX:256] := 0 - - - GFNI - AVX512VL -
immintrin.h
- Arithmetic + FI + ENDFOR + RETURN tword.byte[0] +} +FOR j := 0 TO 31 + dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j]) +ENDFOR +dst[MAX:256] := 0 +
+ + GFNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), - and store the results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). The field GF(2^8) is represented in polynomial - representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1. - - DEFINE gf2p8mul_byte(src1byte, src2byte) { - tword := 0 - FOR i := 0 to 7 - IF src2byte.bit[i] + + + + + Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The field GF(2^8) is represented in polynomial representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1. + +DEFINE gf2p8mul_byte(src1byte, src2byte) { + tword := 0 + FOR i := 0 to 7 + IF src2byte.bit[i] tword := tword XOR (src1byte << i) - FI - ENDFOR - FOR i := 14 downto 8 - p := 0x11B << (i-8) - IF tword.bit[i] + FI + ENDFOR + FOR i := 14 downto 8 + p := 0x11B << (i-8) + IF tword.bit[i] tword := tword XOR p - FI - ENDFOR - RETURN tword.byte[0] - } - FOR j := 0 TO 15 - IF k[j] - dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j]) - ELSE - dst.byte[j] := 0 - FI - ENDFOR - dst[MAX:128] := 0 - - - GFNI - AVX512VL -
immintrin.h
- Arithmetic + FI + ENDFOR + RETURN tword.byte[0] +} +FOR j := 0 TO 15 + IF k[j] + dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j]) + ELSE + dst.byte[j] := 0 + FI +ENDFOR +dst[MAX:128] := 0 +
+ + GFNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), - and store the results in "dst" using writemask "k" (elements are copied from "src"" when - the corresponding mask bit is not set). The field GF(2^8) is represented in polynomial - representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1. - - DEFINE gf2p8mul_byte(src1byte, src2byte) { - tword := 0 - FOR i := 0 to 7 - IF src2byte.bit[i] + + + + + + Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), and store the results in "dst" using writemask "k" (elements are copied from "src"" when the corresponding mask bit is not set). The field GF(2^8) is represented in polynomial representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1. + +DEFINE gf2p8mul_byte(src1byte, src2byte) { + tword := 0 + FOR i := 0 to 7 + IF src2byte.bit[i] tword := tword XOR (src1byte << i) - FI - ENDFOR - FOR i := 14 downto 8 - p := 0x11B << (i-8) - IF tword.bit[i] + FI + ENDFOR + FOR i := 14 downto 8 + p := 0x11B << (i-8) + IF tword.bit[i] tword := tword XOR p - FI - ENDFOR - RETURN tword.byte[0] - } - FOR j := 0 TO 15 - IF k[j] - dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j]) - ELSE - dst.byte[j] := src.byte[j] - FI - ENDFOR - dst[MAX:128] := 0 - - - GFNI - AVX512VL -
immintrin.h
- Arithmetic + FI + ENDFOR + RETURN tword.byte[0] +} +FOR j := 0 TO 15 + IF k[j] + dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j]) + ELSE + dst.byte[j] := src.byte[j] + FI +ENDFOR +dst[MAX:128] := 0 +
+ + GFNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), - and store the results in "dst". The field GF(2^8) is represented in polynomial - representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1. - - DEFINE gf2p8mul_byte(src1byte, src2byte) { - tword := 0 - FOR i := 0 to 7 - IF src2byte.bit[i] + + + + Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), and store the results in "dst". The field GF(2^8) is represented in polynomial representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1. + +DEFINE gf2p8mul_byte(src1byte, src2byte) { + tword := 0 + FOR i := 0 to 7 + IF src2byte.bit[i] tword := tword XOR (src1byte << i) - FI - ENDFOR - FOR i := 14 downto 8 - p := 0x11B << (i-8) - IF tword.bit[i] + FI + ENDFOR + FOR i := 14 downto 8 + p := 0x11B << (i-8) + IF tword.bit[i] tword := tword XOR p - FI - ENDFOR - RETURN tword.byte[0] - } - FOR j := 0 TO 15 - dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j]) - ENDFOR - dst[MAX:128] := 0 - - - GFNI - AVX512VL -
immintrin.h
- Arithmetic + FI + ENDFOR + RETURN tword.byte[0] +} +FOR j := 0 TO 15 + dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j]) +ENDFOR +dst[MAX:128] := 0 +
+ + GFNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compute an affine transformation in the Galois Field 2^8. An affine - transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, - "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed - 8-bit results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - DEFINE parity(x) { - t := 0 - FOR i := 0 to 7 - t := t XOR x.bit[i] - ENDFOR - RETURN t - } - DEFINE affine_byte(tsrc2qw, src1byte, imm8) { - FOR i := 0 to 7 - retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i] - ENDFOR - RETURN retbyte - } - FOR j := 0 TO 3 - FOR i := 0 to 7 - IF k[j*8+i] + + + + + + Compute an affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed 8-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE parity(x) { + t := 0 + FOR i := 0 to 7 + t := t XOR x.bit[i] + ENDFOR + RETURN t +} +DEFINE affine_byte(tsrc2qw, src1byte, imm8) { + FOR i := 0 to 7 + retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i] + ENDFOR + RETURN retbyte +} +FOR j := 0 TO 3 + FOR i := 0 to 7 + IF k[j*8+i] dst.qword[j].byte[i] := affine_byte(A.qword[j], x.qword[j].byte[i], b) - ELSE + ELSE dst.qword[j].byte[i] := 0 - FI - ENDFOR - ENDFOR - dst[MAX:256] := 0 - - - GFNI - AVX512VL -
immintrin.h
- Arithmetic + FI + ENDFOR +ENDFOR +dst[MAX:256] := 0 +
+ + GFNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - - Compute an affine transformation in the Galois Field 2^8. An affine - transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, - "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed - 8-bit results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - DEFINE parity(x) { - t := 0 - FOR i := 0 to 7 - t := t XOR x.bit[i] - ENDFOR - RETURN t - } - DEFINE affine_byte(tsrc2qw, src1byte, imm8) { - FOR i := 0 to 7 - retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i] - ENDFOR - RETURN retbyte - } - FOR j := 0 TO 3 - FOR i := 0 to 7 - IF k[j*8+i] + + + + + + + Compute an affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed 8-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE parity(x) { + t := 0 + FOR i := 0 to 7 + t := t XOR x.bit[i] + ENDFOR + RETURN t +} +DEFINE affine_byte(tsrc2qw, src1byte, imm8) { + FOR i := 0 to 7 + retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i] + ENDFOR + RETURN retbyte +} +FOR j := 0 TO 3 + FOR i := 0 to 7 + IF k[j*8+i] dst.qword[j].byte[i] := affine_byte(A.qword[j], x.qword[j].byte[i], b) - ELSE + ELSE dst.qword[j].byte[i] := src.qword[j].byte[i] - FI - ENDFOR - ENDFOR - dst[MAX:256] := 0 - - - GFNI - AVX512VL -
immintrin.h
- Arithmetic + FI + ENDFOR +ENDFOR +dst[MAX:256] := 0 +
+ + GFNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compute an affine transformation in the Galois Field 2^8. An affine - transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, - "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed - 8-bit results in "dst". - - DEFINE parity(x) { - t := 0 - FOR i := 0 to 7 - t := t XOR x.bit[i] - ENDFOR - RETURN t - } - DEFINE affine_byte(tsrc2qw, src1byte, imm8) { - FOR i := 0 to 7 - retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i] - ENDFOR - RETURN retbyte - } - FOR j := 0 TO 3 - FOR i := 0 to 7 - dst.qword[j].byte[i] := affine_byte(A.qword[j], x.qword[j].byte[i], b) - ENDFOR - ENDFOR - dst[MAX:256] := 0 - - - GFNI - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compute an affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed 8-bit results in "dst". + +DEFINE parity(x) { + t := 0 + FOR i := 0 to 7 + t := t XOR x.bit[i] + ENDFOR + RETURN t +} +DEFINE affine_byte(tsrc2qw, src1byte, imm8) { + FOR i := 0 to 7 + retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i] + ENDFOR + RETURN retbyte +} +FOR j := 0 TO 3 + FOR i := 0 to 7 + dst.qword[j].byte[i] := affine_byte(A.qword[j], x.qword[j].byte[i], b) + ENDFOR +ENDFOR +dst[MAX:256] := 0 + + + GFNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compute an affine transformation in the Galois Field 2^8. An affine - transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, - "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed - 8-bit results in "dst" using zeromask "k" (elements are zeroed out when the - corresponding mask bit is not set). - - DEFINE parity(x) { - t := 0 - FOR i := 0 to 7 - t := t XOR x.bit[i] - ENDFOR - RETURN t - } - DEFINE affine_byte(tsrc2qw, src1byte, imm8) { - FOR i := 0 to 7 - retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i] - ENDFOR - RETURN retbyte - } - FOR j := 0 TO 1 - FOR i := 0 to 7 - IF k[j*8+i] + + + + + + Compute an affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed 8-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + +DEFINE parity(x) { + t := 0 + FOR i := 0 to 7 + t := t XOR x.bit[i] + ENDFOR + RETURN t +} +DEFINE affine_byte(tsrc2qw, src1byte, imm8) { + FOR i := 0 to 7 + retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i] + ENDFOR + RETURN retbyte +} +FOR j := 0 TO 1 + FOR i := 0 to 7 + IF k[j*8+i] dst.qword[j].byte[i] := affine_byte(A.qword[j], x.qword[j].byte[i], b) - ELSE + ELSE dst.qword[j].byte[i] := 0 - FI - ENDFOR - ENDFOR - dst[MAX:128] := 0 - - - GFNI - AVX512VL -
immintrin.h
- Arithmetic + FI + ENDFOR +ENDFOR +dst[MAX:128] := 0 +
+ + GFNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - - Compute an affine transformation in the Galois Field 2^8. An affine - transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, - "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed - 8-bit results in "dst" using writemask "k" (elements are copied from "src" when the - corresponding mask bit is not set). - - DEFINE parity(x) { - t := 0 - FOR i := 0 to 7 - t := t XOR x.bit[i] - ENDFOR - RETURN t - } - DEFINE affine_byte(tsrc2qw, src1byte, imm8) { - FOR i := 0 to 7 - retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i] - ENDFOR - RETURN retbyte - } - FOR j := 0 TO 1 - FOR i := 0 to 7 - IF k[j*8+i] + + + + + + + Compute an affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed 8-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + +DEFINE parity(x) { + t := 0 + FOR i := 0 to 7 + t := t XOR x.bit[i] + ENDFOR + RETURN t +} +DEFINE affine_byte(tsrc2qw, src1byte, imm8) { + FOR i := 0 to 7 + retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i] + ENDFOR + RETURN retbyte +} +FOR j := 0 TO 1 + FOR i := 0 to 7 + IF k[j*8+i] dst.qword[j].byte[i] := affine_byte(A.qword[j], x.qword[j].byte[i], b) - ELSE + ELSE dst.qword[j].byte[i] := src.qword[j].byte[i] - FI - ENDFOR - ENDFOR - dst[MAX:128] := 0 - - - GFNI - AVX512VL -
immintrin.h
- Arithmetic + FI + ENDFOR +ENDFOR +dst[MAX:128] := 0 +
+ + GFNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compute an affine transformation in the Galois Field 2^8. An affine - transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, - "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed - 8-bit results in "dst". - - DEFINE parity(x) { - t := 0 - FOR i := 0 to 7 - t := t XOR x.bit[i] - ENDFOR - RETURN t - } - DEFINE affine_byte(tsrc2qw, src1byte, imm8) { - FOR i := 0 to 7 - retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i] - ENDFOR - RETURN retbyte - } - FOR j := 0 TO 1 - FOR i := 0 to 7 - dst.qword[j].byte[i] := affine_byte(A.qword[j], x.qword[j].byte[i], b) - ENDFOR - ENDFOR - dst[MAX:128] := 0 - - - GFNI - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compute an affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed 8-bit results in "dst". + +DEFINE parity(x) { + t := 0 + FOR i := 0 to 7 + t := t XOR x.bit[i] + ENDFOR + RETURN t +} +DEFINE affine_byte(tsrc2qw, src1byte, imm8) { + FOR i := 0 to 7 + retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i] + ENDFOR + RETURN retbyte +} +FOR j := 0 TO 1 + FOR i := 0 to 7 + dst.qword[j].byte[i] := affine_byte(A.qword[j], x.qword[j].byte[i], b) + ENDFOR +ENDFOR +dst[MAX:128] := 0 + + + GFNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compute an inverse affine transformation in the Galois Field 2^8. An affine - transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, - "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the - 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 - + x + 1. Store the packed 8-bit results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - DEFINE parity(x) { - t := 0 - FOR i := 0 to 7 - t := t XOR x.bit[i] - ENDFOR - RETURN t - } - DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) { - FOR i := 0 to 7 - retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i] - ENDFOR - RETURN retbyte - } - FOR j := 0 TO 3 - FOR i := 0 to 7 - IF k[j*8+i] + + + + + + Compute an inverse affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 + x + 1. Store the packed 8-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + DEFINE parity(x) { + t := 0 + FOR i := 0 to 7 + t := t XOR x.bit[i] + ENDFOR + RETURN t +} +DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) { + FOR i := 0 to 7 + retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i] + ENDFOR + RETURN retbyte +} +FOR j := 0 TO 3 + FOR i := 0 to 7 + IF k[j*8+i] dst.qword[j].byte[i] := affine_inverse_byte(A.qword[j], x.qword[j].byte[i], b) - ELSE + ELSE dst.qword[j].byte[i] := 0 - FI - ENDFOR - ENDFOR - dst[MAX:256] := 0 - - - GFNI - AVX512VL -
immintrin.h
- Arithmetic + FI + ENDFOR +ENDFOR +dst[MAX:256] := 0 +
+ + GFNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - - Compute an inverse affine transformation in the Galois Field 2^8. An affine - transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, - "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the - 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 - + x + 1. Store the packed 8-bit results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - DEFINE parity(x) { - t := 0 - FOR i := 0 to 7 - t := t XOR x.bit[i] - ENDFOR - RETURN t - } - DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) { - FOR i := 0 to 7 - retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i] - ENDFOR - RETURN retbyte - } - FOR j := 0 TO 3 - FOR i := 0 to 7 - IF k[j*8+i] + + + + + + + Compute an inverse affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 + x + 1. Store the packed 8-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + DEFINE parity(x) { + t := 0 + FOR i := 0 to 7 + t := t XOR x.bit[i] + ENDFOR + RETURN t +} +DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) { + FOR i := 0 to 7 + retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i] + ENDFOR + RETURN retbyte +} +FOR j := 0 TO 3 + FOR i := 0 to 7 + IF k[j*8+i] dst.qword[j].byte[i] := affine_inverse_byte(A.qword[j], x.qword[j].byte[i], b) - ELSE + ELSE dst.qword[j].byte[i] := src.qword[j].byte[i] - FI - ENDFOR - ENDFOR - dst[MAX:256] := 0 - - - GFNI - AVX512VL -
immintrin.h
- Arithmetic + FI + ENDFOR +ENDFOR +dst[MAX:256] := 0 +
+ + GFNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compute an inverse affine transformation in the Galois Field 2^8. An affine - transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, - "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the - 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 - + x + 1. Store the packed 8-bit results in "dst". - DEFINE parity(x) { - t := 0 - FOR i := 0 to 7 - t := t XOR x.bit[i] - ENDFOR - RETURN t - } - DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) { - FOR i := 0 to 7 - retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i] - ENDFOR - RETURN retbyte - } - FOR j := 0 TO 3 - FOR i := 0 to 7 - dst.qword[j].byte[i] := affine_inverse_byte(A.qword[j], x.qword[j].byte[i], b) - ENDFOR - ENDFOR - dst[MAX:256] := 0 - - - GFNI - AVX512VL -
immintrin.h
- Arithmetic + + + + + Compute an inverse affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 + x + 1. Store the packed 8-bit results in "dst". + DEFINE parity(x) { + t := 0 + FOR i := 0 to 7 + t := t XOR x.bit[i] + ENDFOR + RETURN t +} +DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) { + FOR i := 0 to 7 + retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i] + ENDFOR + RETURN retbyte +} +FOR j := 0 TO 3 + FOR i := 0 to 7 + dst.qword[j].byte[i] := affine_inverse_byte(A.qword[j], x.qword[j].byte[i], b) + ENDFOR +ENDFOR +dst[MAX:256] := 0 + + + GFNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - Compute an inverse affine transformation in the Galois Field 2^8. An affine - transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, - "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the - 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 - + x + 1. Store the packed 8-bit results in "dst" using zeromask "k" (elements are zeroed - out when the corresponding mask bit is not set). - DEFINE parity(x) { - t := 0 - FOR i := 0 to 7 - t := t XOR x.bit[i] - ENDFOR - RETURN t - } - DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) { - FOR i := 0 to 7 - retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i] - ENDFOR - RETURN retbyte - } - FOR j := 0 TO 1 - FOR i := 0 to 7 - IF k[j*8+i] + + + + + + Compute an inverse affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 + x + 1. Store the packed 8-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). + DEFINE parity(x) { + t := 0 + FOR i := 0 to 7 + t := t XOR x.bit[i] + ENDFOR + RETURN t +} +DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) { + FOR i := 0 to 7 + retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i] + ENDFOR + RETURN retbyte +} +FOR j := 0 TO 1 + FOR i := 0 to 7 + IF k[j*8+i] dst.qword[j].byte[i] := affine_inverse_byte(A.qword[j], x.qword[j].byte[i], b) - ELSE + ELSE dst.qword[j].byte[i] := 0 - FI - ENDFOR - ENDFOR - dst[MAX:128] := 0 - - - GFNI - AVX512VL -
immintrin.h
- Arithmetic + FI + ENDFOR +ENDFOR +dst[MAX:128] := 0 +
+ + GFNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - - - Compute an inverse affine transformation in the Galois Field 2^8. An affine - transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, - "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the - 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 - + x + 1. Store the packed 8-bit results in "dst" using writemask "k" (elements are - copied from "src" when the corresponding mask bit is not set). - DEFINE parity(x) { - t := 0 - FOR i := 0 to 7 - t := t XOR x.bit[i] - ENDFOR - RETURN t - } - DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) { - FOR i := 0 to 7 - retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i] - ENDFOR - RETURN retbyte - } - FOR j := 0 TO 1 - FOR i := 0 to 7 - IF k[j*8+i] + + + + + + + Compute an inverse affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 + x + 1. Store the packed 8-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). + DEFINE parity(x) { + t := 0 + FOR i := 0 to 7 + t := t XOR x.bit[i] + ENDFOR + RETURN t +} +DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) { + FOR i := 0 to 7 + retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i] + ENDFOR + RETURN retbyte +} +FOR j := 0 TO 1 + FOR i := 0 to 7 + IF k[j*8+i] dst.qword[j].byte[i] := affine_inverse_byte(A.qword[j], x.qword[j].byte[i], b) - ELSE + ELSE dst.qword[j].byte[i] := src.qword[j].byte[i] - FI - ENDFOR - ENDFOR - dst[MAX:128] := 0 - - - GFNI - AVX512VL -
immintrin.h
- Arithmetic + FI + ENDFOR +ENDFOR +dst[MAX:128] := 0 +
+ + GFNI + AVX512VL +
immintrin.h
+ Arithmetic
- - - - - Compute an inverse affine transformation in the Galois Field 2^8. An affine - transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, - "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the - 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 - + x + 1. Store the packed 8-bit results in "dst". - DEFINE parity(x) { - t := 0 - FOR i := 0 to 7 - t := t XOR x.bit[i] - ENDFOR - RETURN t - } - DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) { - FOR i := 0 to 7 - retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i] - ENDFOR - RETURN retbyte - } - FOR j := 0 TO 1 - FOR i := 0 to 7 - dst.qword[j].byte[i] := affine_inverse_byte(A.qword[j], x.qword[j].byte[i], b) - ENDFOR - ENDFOR - dst[MAX:128] := 0 - - - GFNI - AVX512VL -
immintrin.h
- Arithmetic -
- - - - - - Provides a hint to the processor to selectively reset the prediction history of - the current logical processor specified by a signed 32-bit integer "__eax". - - HRESET -
immintrin.h
- General Support -
- - - - - - Invalidate mappings in the Translation Lookaside Buffers (TLBs) and - paging-structure caches for the processor context identifier (PCID) specified by - "descriptor" based on the invalidation type specified in "type". - The PCID "descriptor" is specified as a 16-byte memory operand (with no alignment - restrictions) where bits [11:0] specify the PCID, and bits [127:64] specify the linear - address; bits [63:12] are reserved. - The types supported are: - 0) Individual-address invalidation: If "type" is 0, the logical processor invalidates - mappings for a single linear address and tagged with the PCID specified in "descriptor", - except global translations. The instruction may also invalidate global translations, - mappings for other linear addresses, or mappings tagged with other PCIDs. - 1) Single-context invalidation: If "type" is 1, the logical processor invalidates all - mappings tagged with the PCID specified in "descriptor" except global translations. In - some cases, it may invalidate mappings for other PCIDs as well. - 2) All-context invalidation: If "type" is 2, the logical processor invalidates all - mappings tagged with any PCID. - 3) All-context invalidation, retaining global translations: If "type" is 3, the logical - processor invalidates all mappings tagged with any PCID except global translations, - ignoring "descriptor". The instruction may also invalidate global translations as well. - - CASE type[1:0] OF - 0: // individual-address invalidation retaining global translations - OP_PCID := MEM[descriptor+11:descriptor] - ADDR := MEM[descriptor+127:descriptor+64] - BREAK - 1: // single PCID invalidation retaining globals - OP_PCID := MEM[descriptor+11:descriptor] - // invalidate all mappings tagged with OP_PCID except global translations - BREAK - 2: // all PCID invalidation - // invalidate all mappings tagged with any PCID - BREAK - 3: // all PCID invalidation retaining global translations - // invalidate all mappings tagged with any PCID except global translations - BREAK - ESAC - - - INVPCID -
immintrin.h
- OS-Targeted -
- - - + + + + + Compute an inverse affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 + x + 1. Store the packed 8-bit results in "dst". + DEFINE parity(x) { + t := 0 + FOR i := 0 to 7 + t := t XOR x.bit[i] + ENDFOR + RETURN t +} +DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) { + FOR i := 0 to 7 + retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i] + ENDFOR + RETURN retbyte +} +FOR j := 0 TO 1 + FOR i := 0 to 7 + dst.qword[j].byte[i] := affine_inverse_byte(A.qword[j], x.qword[j].byte[i], b) + ENDFOR +ENDFOR +dst[MAX:128] := 0 + + + GFNI + AVX512VL +
immintrin.h
+ Arithmetic +
+ + + + + + Provides a hint to the processor to selectively reset the prediction history of the current logical processor specified by a signed 32-bit integer "__eax". + + HRESET +
immintrin.h
+ General Support +
+ + + + + + Invalidate mappings in the Translation Lookaside Buffers (TLBs) and paging-structure caches for the processor context identifier (PCID) specified by "descriptor" based on the invalidation type specified in "type". + The PCID "descriptor" is specified as a 16-byte memory operand (with no alignment restrictions) where bits [11:0] specify the PCID, and bits [127:64] specify the linear address; bits [63:12] are reserved. + The types supported are: + 0) Individual-address invalidation: If "type" is 0, the logical processor invalidates mappings for a single linear address and tagged with the PCID specified in "descriptor", except global translations. The instruction may also invalidate global translations, mappings for other linear addresses, or mappings tagged with other PCIDs. + 1) Single-context invalidation: If "type" is 1, the logical processor invalidates all mappings tagged with the PCID specified in "descriptor" except global translations. In some cases, it may invalidate mappings for other PCIDs as well. + 2) All-context invalidation: If "type" is 2, the logical processor invalidates all mappings tagged with any PCID. + 3) All-context invalidation, retaining global translations: If "type" is 3, the logical processor invalidates all mappings tagged with any PCID except global translations, ignoring "descriptor". The instruction may also invalidate global translations as well. + +CASE type[1:0] OF +0: // individual-address invalidation retaining global translations + OP_PCID := MEM[descriptor+11:descriptor] + ADDR := MEM[descriptor+127:descriptor+64] + BREAK +1: // single PCID invalidation retaining globals + OP_PCID := MEM[descriptor+11:descriptor] + // invalidate all mappings tagged with OP_PCID except global translations + BREAK +2: // all PCID invalidation + // invalidate all mappings tagged with any PCID + BREAK +3: // all PCID invalidation retaining global translations + // invalidate all mappings tagged with any PCID except global translations + BREAK +ESAC + + + INVPCID +
immintrin.h
+ OS-Targeted +
+ + + Flag - Decrypt 10 rounds of unsigned 8-bit integers in "__idata" using 128-bit AES key - specified in "__h", store the resulting unsigned 8-bit integers into the corresponding - elements of "__odata", and set "dst" to the ZF flag status. If exception happens, set ZF - flag to 1 and zero initialize "__odata". + Decrypt 10 rounds of unsigned 8-bit integers in "__idata" using 128-bit AES key specified in "__h", store the resulting unsigned 8-bit integers into the corresponding elements of "__odata", and set "dst" to the ZF flag status. If exception happens, set ZF flag to 1 and zero initialize "__odata". MEM[__odata+127:__odata] := AES128Decrypt (__idata[127:0], __h[383:0]) - dst := ZF +dst := ZF - KEYLOCKER -
immintrin.h
- Cryptography + KEYLOCKER +
immintrin.h
+ Cryptography
Flag @@ -161304,17 +142227,14 @@ - Decrypt 10 rounds of unsigned 8-bit integers in "__idata" using 256-bit AES key - specified in "__h", store the resulting unsigned 8-bit integers into the corresponding - elements of "__odata", and set "dst" to the ZF flag status. If exception happens, set ZF - flag to 1 and zero initialize "__odata". + Decrypt 10 rounds of unsigned 8-bit integers in "__idata" using 256-bit AES key specified in "__h", store the resulting unsigned 8-bit integers into the corresponding elements of "__odata", and set "dst" to the ZF flag status. If exception happens, set ZF flag to 1 and zero initialize "__odata". MEM[__odata+127:__odata] := AES256Decrypt (__idata[127:0], __h[511:0]) - dst := ZF +dst := ZF - KEYLOCKER -
immintrin.h
- Cryptography + KEYLOCKER +
immintrin.h
+ Cryptography
Flag @@ -161322,16 +142242,14 @@ - Encrypt 10 rounds of unsigned 8-bit integers in "__idata" using 128-bit AES key - specified in "__h", store the resulting unsigned 8-bit integers into the corresponding - elements of "__odata", and set "dst" to the ZF flag status. + Encrypt 10 rounds of unsigned 8-bit integers in "__idata" using 128-bit AES key specified in "__h", store the resulting unsigned 8-bit integers into the corresponding elements of "__odata", and set "dst" to the ZF flag status. MEM[__odata+127:__odata] := AES128Encrypt (__idata[127:0], __h[383:0]) - dst := ZF +dst := ZF - KEYLOCKER -
immintrin.h
- Cryptography + KEYLOCKER +
immintrin.h
+ Cryptography
Flag @@ -161339,17 +142257,14 @@ - Encrypt 10 rounds of unsigned 8-bit integers in "__idata" using 256-bit AES key - specified in "__h", store the resulting unsigned 8-bit integers into the corresponding - elements of "__odata", and set "dst" to the ZF flag status. If exception happens, set ZF - flag to 1 and zero initialize "__odata". + Encrypt 10 rounds of unsigned 8-bit integers in "__idata" using 256-bit AES key specified in "__h", store the resulting unsigned 8-bit integers into the corresponding elements of "__odata", and set "dst" to the ZF flag status. If exception happens, set ZF flag to 1 and zero initialize "__odata". MEM[__odata+127:__odata] := AES256Encrypt (__idata[127:0], __h[511:0]) - dst := ZF +dst := ZF - KEYLOCKER -
immintrin.h
- Cryptography + KEYLOCKER +
immintrin.h
+ Cryptography
Flag @@ -161357,17 +142272,15 @@ - Wrap a 128-bit AES key from "__key" into a 384-bit key __h stored in "__h" and - set IWKey's NoBackup and KeySource bits in "dst". The explicit source operand "__htype" - specifies __h restrictions. + Wrap a 128-bit AES key from "__key" into a 384-bit key __h stored in "__h" and set IWKey's NoBackup and KeySource bits in "dst". The explicit source operand "__htype" specifies __h restrictions. __h[383:0] := WrapKey128(__key[127:0], __htype) - dst[0] := IWKey.NoBackup - dst[4:1] := IWKey.KeySource[3:0] +dst[0] := IWKey.NoBackup +dst[4:1] := IWKey.KeySource[3:0] - KEYLOCKER -
immintrin.h
- Cryptography + KEYLOCKER +
immintrin.h
+ Cryptography
Flag @@ -161376,17 +142289,15 @@ - Wrap a 256-bit AES key from "__key_hi" and "__key_lo" into a 512-bit key stored - in "__h" and set IWKey's NoBackup and KeySource bits in "dst". The 32-bit "__htype" - specifies __h restrictions. + Wrap a 256-bit AES key from "__key_hi" and "__key_lo" into a 512-bit key stored in "__h" and set IWKey's NoBackup and KeySource bits in "dst". The 32-bit "__htype" specifies __h restrictions. __h[511:0] := WrapKey256(__key_lo[127:0], __key_hi[127:0], __htype) - dst[0] := IWKey.NoBackup - dst[4:1] := IWKey.KeySource[3:0] +dst[0] := IWKey.NoBackup +dst[4:1] := IWKey.KeySource[3:0] - KEYLOCKER -
immintrin.h
- Cryptography + KEYLOCKER +
immintrin.h
+ Cryptography
Flag @@ -161395,34 +142306,28 @@ - Load internal wrapping key (IWKey). The 32-bit unsigned integer "__ctl" - specifies IWKey's KeySource and whether backing up the key is permitted. IWKey's 256-bit - encryption key is loaded from "__enkey_lo" and "__enkey_hi". IWKey's 128-bit integrity - key is loaded from "__intkey". + Load internal wrapping key (IWKey). The 32-bit unsigned integer "__ctl" specifies IWKey's KeySource and whether backing up the key is permitted. IWKey's 256-bit encryption key is loaded from "__enkey_lo" and "__enkey_hi". IWKey's 128-bit integrity key is loaded from "__intkey". - KEYLOCKER -
immintrin.h
- Cryptography + KEYLOCKER +
immintrin.h
+ Cryptography
- + Flag - Decrypt 10 rounds of 8 groups of unsigned 8-bit integers in "__idata" using - 128-bit AES key specified in "__h", store the resulting unsigned 8-bit integers into the - corresponding elements of "__odata", and set "dst" to the ZF flag status. If exception - happens, set ZF flag to 1 and zero initialize "__odata". + Decrypt 10 rounds of 8 groups of unsigned 8-bit integers in "__idata" using 128-bit AES key specified in "__h", store the resulting unsigned 8-bit integers into the corresponding elements of "__odata", and set "dst" to the ZF flag status. If exception happens, set ZF flag to 1 and zero initialize "__odata". FOR i := 0 to 7 - __odata[i] := AES128Decrypt (__idata[i], __h[383:0]) - ENDFOR - dst := ZF + __odata[i] := AES128Decrypt (__idata[i], __h[383:0]) +ENDFOR +dst := ZF - KEYLOCKER_WIDE -
immintrin.h
- Cryptography + KEYLOCKER_WIDE +
immintrin.h
+ Cryptography
Flag @@ -161430,19 +142335,16 @@ - Decrypt 10 rounds of 8 groups of unsigned 8-bit integers in "__idata" using - 256-bit AES key specified in "__h", store the resulting unsigned 8-bit integers into the - corresponding elements of "__odata", and set "dst" to the ZF flag status. If exception - happens, set ZF flag to 1 and zero initialize "__odata". + Decrypt 10 rounds of 8 groups of unsigned 8-bit integers in "__idata" using 256-bit AES key specified in "__h", store the resulting unsigned 8-bit integers into the corresponding elements of "__odata", and set "dst" to the ZF flag status. If exception happens, set ZF flag to 1 and zero initialize "__odata". FOR i := 0 to 7 - __odata[i] := AES256Decrypt (__idata[i], __h[511:0]) - ENDFOR - dst := ZF + __odata[i] := AES256Decrypt (__idata[i], __h[511:0]) +ENDFOR +dst := ZF - KEYLOCKER_WIDE -
immintrin.h
- Cryptography + KEYLOCKER_WIDE +
immintrin.h
+ Cryptography
Flag @@ -161450,19 +142352,16 @@ - Encrypt 10 rounds of 8 groups of unsigned 8-bit integers in "__idata" using - 128-bit AES key specified in "__h", store the resulting unsigned 8-bit integers into the - corresponding elements of "__odata", and set "dst" to the ZF flag status. If exception - happens, set ZF flag to 1 and zero initialize "__odata". + Encrypt 10 rounds of 8 groups of unsigned 8-bit integers in "__idata" using 128-bit AES key specified in "__h", store the resulting unsigned 8-bit integers into the corresponding elements of "__odata", and set "dst" to the ZF flag status. If exception happens, set ZF flag to 1 and zero initialize "__odata". FOR i := 0 to 7 - __odata[i] := AES128Encrypt (__idata[i], __h[383:0]) - ENDFOR - dst := ZF + __odata[i] := AES128Encrypt (__idata[i], __h[383:0]) +ENDFOR +dst := ZF - KEYLOCKER_WIDE -
immintrin.h
- Cryptography + KEYLOCKER_WIDE +
immintrin.h
+ Cryptography
Flag @@ -161470,17319 +142369,16054 @@ - Encrypt 10 rounds of 8 groups of unsigned 8-bit integers in "__idata" using - 256-bit AES key specified in "__h", store the resulting unsigned 8-bit integers into the - corresponding elements of "__odata", and set "dst" to the ZF flag status. If exception - happens, set ZF flag to 1 and zero initialize "__odata". + Encrypt 10 rounds of 8 groups of unsigned 8-bit integers in "__idata" using 256-bit AES key specified in "__h", store the resulting unsigned 8-bit integers into the corresponding elements of "__odata", and set "dst" to the ZF flag status. If exception happens, set ZF flag to 1 and zero initialize "__odata". FOR i := 0 to 7 - __odata[i] := AES256Encrypt (__idata[i], __h[512:0]) - ENDFOR - dst := ZF + __odata[i] := AES256Encrypt (__idata[i], __h[512:0]) +ENDFOR +dst := ZF - KEYLOCKER_WIDE -
immintrin.h
- Cryptography -
- - - - - Count the number of leading zero bits in unsigned 32-bit integer "a", and - return that count in "dst". - - tmp := 31 - dst := 0 - DO WHILE (tmp >= 0 AND a[tmp] == 0) - tmp := tmp - 1 - dst := dst + 1 - OD - - - LZCNT -
immintrin.h
- Bit Manipulation + KEYLOCKER_WIDE +
immintrin.h
+ Cryptography +
+ + + + + Count the number of leading zero bits in unsigned 32-bit integer "a", and return that count in "dst". + +tmp := 31 +dst := 0 +DO WHILE (tmp >= 0 AND a[tmp] == 0) + tmp := tmp - 1 + dst := dst + 1 +OD + + + LZCNT +
immintrin.h
+ Bit Manipulation
- - - Count the number of leading zero bits in unsigned 64-bit integer "a", and - return that count in "dst". - - tmp := 63 - dst := 0 - DO WHILE (tmp >= 0 AND a[tmp] == 0) - tmp := tmp - 1 - dst := dst + 1 - OD - - - LZCNT -
immintrin.h
- Bit Manipulation -
- - - - - - Copy 64-bit integer "a" to "dst". - - dst[63:0] := a[63:0] - - - MMX -
mmintrin.h
- Convert + + + Count the number of leading zero bits in unsigned 64-bit integer "a", and return that count in "dst". + +tmp := 63 +dst := 0 +DO WHILE (tmp >= 0 AND a[tmp] == 0) + tmp := tmp - 1 + dst := dst + 1 +OD + + + LZCNT +
immintrin.h
+ Bit Manipulation +
+ + + + + + Copy 64-bit integer "a" to "dst". + +dst[63:0] := a[63:0] + + + MMX +
mmintrin.h
+ Convert
- - - Copy 64-bit integer "a" to "dst". - - dst[63:0] := a[63:0] - - - MMX -
mmintrin.h
- Convert + + + Copy 64-bit integer "a" to "dst". + +dst[63:0] := a[63:0] + + + MMX +
mmintrin.h
+ Convert
- - - Copy 32-bit integer "a" to the lower elements of "dst", and zero the upper - element of "dst". - - dst[31:0] := a[31:0] - dst[63:32] := 0 - - - MMX -
mmintrin.h
- Convert + + + Copy 32-bit integer "a" to the lower elements of "dst", and zero the upper element of "dst". + +dst[31:0] := a[31:0] +dst[63:32] := 0 + + + MMX +
mmintrin.h
+ Convert
- - - Copy the lower 32-bit integer in "a" to "dst". - - dst[31:0] := a[31:0] - - - MMX -
mmintrin.h
- Convert + + + Copy the lower 32-bit integer in "a" to "dst". + +dst[31:0] := a[31:0] + + + MMX +
mmintrin.h
+ Convert
- - - Copy 32-bit integer "a" to the lower elements of "dst", and zero the upper - element of "dst". - - dst[31:0] := a[31:0] - dst[63:32] := 0 - - - MMX -
mmintrin.h
- Convert + + + Copy 32-bit integer "a" to the lower elements of "dst", and zero the upper element of "dst". + +dst[31:0] := a[31:0] +dst[63:32] := 0 + + + MMX +
mmintrin.h
+ Convert
- - - Copy the lower 32-bit integer in "a" to "dst". - - dst[31:0] := a[31:0] - - - MMX -
mmintrin.h
- Convert + + + Copy the lower 32-bit integer in "a" to "dst". + +dst[31:0] := a[31:0] + + + MMX +
mmintrin.h
+ Convert
- - - Copy 64-bit integer "a" to "dst". - - dst[63:0] := a[63:0] - - - MMX -
mmintrin.h
- Convert + + + Copy 64-bit integer "a" to "dst". + +dst[63:0] := a[63:0] + + + MMX +
mmintrin.h
+ Convert
- - - Copy 64-bit integer "a" to "dst". - - dst[63:0] := a[63:0] - - - MMX -
mmintrin.h
- Convert + + + Copy 64-bit integer "a" to "dst". + +dst[63:0] := a[63:0] + + + MMX +
mmintrin.h
+ Convert
- - - Empty the MMX state, which marks the x87 FPU registers as available for use by - x87 instructions. This instruction must be used at the end of all MMX technology - procedures. - - MMX -
mmintrin.h
- General Support + + + Empty the MMX state, which marks the x87 FPU registers as available for use by x87 instructions. This instruction must be used at the end of all MMX technology procedures. + + MMX +
mmintrin.h
+ General Support
- - - Empty the MMX state, which marks the x87 FPU registers as available for use by - x87 instructions. This instruction must be used at the end of all MMX technology - procedures. - - MMX -
mmintrin.h
- General Support + + + Empty the MMX state, which marks the x87 FPU registers as available for use by x87 instructions. This instruction must be used at the end of all MMX technology procedures. + + MMX +
mmintrin.h
+ General Support
- - - - Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers - using signed saturation, and store the results in "dst". - - dst[7:0] := Saturate8(a[15:0]) - dst[15:8] := Saturate8(a[31:16]) - dst[23:16] := Saturate8(a[47:32]) - dst[31:24] := Saturate8(a[63:48]) - dst[39:32] := Saturate8(b[15:0]) - dst[47:40] := Saturate8(b[31:16]) - dst[55:48] := Saturate8(b[47:32]) - dst[63:56] := Saturate8(b[63:48]) - - - MMX -
mmintrin.h
- Miscellaneous + + + + Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst". + +dst[7:0] := Saturate8(a[15:0]) +dst[15:8] := Saturate8(a[31:16]) +dst[23:16] := Saturate8(a[47:32]) +dst[31:24] := Saturate8(a[63:48]) +dst[39:32] := Saturate8(b[15:0]) +dst[47:40] := Saturate8(b[31:16]) +dst[55:48] := Saturate8(b[47:32]) +dst[63:56] := Saturate8(b[63:48]) + + + MMX +
mmintrin.h
+ Miscellaneous
- - - - Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit - integers using signed saturation, and store the results in "dst". - - dst[15:0] := Saturate16(a[31:0]) - dst[31:16] := Saturate16(a[63:32]) - dst[47:32] := Saturate16(b[31:0]) - dst[63:48] := Saturate16(b[63:32]) - - - MMX -
mmintrin.h
- Miscellaneous + + + + Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst". + +dst[15:0] := Saturate16(a[31:0]) +dst[31:16] := Saturate16(a[63:32]) +dst[47:32] := Saturate16(b[31:0]) +dst[63:48] := Saturate16(b[63:32]) + + + MMX +
mmintrin.h
+ Miscellaneous
- - - - Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers - using unsigned saturation, and store the results in "dst". - - dst[7:0] := SaturateU8(a[15:0]) - dst[15:8] := SaturateU8(a[31:16]) - dst[23:16] := SaturateU8(a[47:32]) - dst[31:24] := SaturateU8(a[63:48]) - dst[39:32] := SaturateU8(b[15:0]) - dst[47:40] := SaturateU8(b[31:16]) - dst[55:48] := SaturateU8(b[47:32]) - dst[63:56] := SaturateU8(b[63:48]) - - - MMX -
mmintrin.h
- Miscellaneous + + + + Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst". + +dst[7:0] := SaturateU8(a[15:0]) +dst[15:8] := SaturateU8(a[31:16]) +dst[23:16] := SaturateU8(a[47:32]) +dst[31:24] := SaturateU8(a[63:48]) +dst[39:32] := SaturateU8(b[15:0]) +dst[47:40] := SaturateU8(b[31:16]) +dst[55:48] := SaturateU8(b[47:32]) +dst[63:56] := SaturateU8(b[63:48]) + + + MMX +
mmintrin.h
+ Miscellaneous
- - - - Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers - using signed saturation, and store the results in "dst". - - dst[7:0] := Saturate8(a[15:0]) - dst[15:8] := Saturate8(a[31:16]) - dst[23:16] := Saturate8(a[47:32]) - dst[31:24] := Saturate8(a[63:48]) - dst[39:32] := Saturate8(b[15:0]) - dst[47:40] := Saturate8(b[31:16]) - dst[55:48] := Saturate8(b[47:32]) - dst[63:56] := Saturate8(b[63:48]) - - - MMX -
mmintrin.h
- Miscellaneous + + + + Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst". + +dst[7:0] := Saturate8(a[15:0]) +dst[15:8] := Saturate8(a[31:16]) +dst[23:16] := Saturate8(a[47:32]) +dst[31:24] := Saturate8(a[63:48]) +dst[39:32] := Saturate8(b[15:0]) +dst[47:40] := Saturate8(b[31:16]) +dst[55:48] := Saturate8(b[47:32]) +dst[63:56] := Saturate8(b[63:48]) + + + MMX +
mmintrin.h
+ Miscellaneous
- - - - Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit - integers using signed saturation, and store the results in "dst". - - dst[15:0] := Saturate16(a[31:0]) - dst[31:16] := Saturate16(a[63:32]) - dst[47:32] := Saturate16(b[31:0]) - dst[63:48] := Saturate16(b[63:32]) - - - MMX -
mmintrin.h
- Miscellaneous + + + + Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst". + +dst[15:0] := Saturate16(a[31:0]) +dst[31:16] := Saturate16(a[63:32]) +dst[47:32] := Saturate16(b[31:0]) +dst[63:48] := Saturate16(b[63:32]) + + + MMX +
mmintrin.h
+ Miscellaneous
- - - - Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers - using unsigned saturation, and store the results in "dst". - - dst[7:0] := SaturateU8(a[15:0]) - dst[15:8] := SaturateU8(a[31:16]) - dst[23:16] := SaturateU8(a[47:32]) - dst[31:24] := SaturateU8(a[63:48]) - dst[39:32] := SaturateU8(b[15:0]) - dst[47:40] := SaturateU8(b[31:16]) - dst[55:48] := SaturateU8(b[47:32]) - dst[63:56] := SaturateU8(b[63:48]) - - - MMX -
mmintrin.h
- Miscellaneous + + + + Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst". + +dst[7:0] := SaturateU8(a[15:0]) +dst[15:8] := SaturateU8(a[31:16]) +dst[23:16] := SaturateU8(a[47:32]) +dst[31:24] := SaturateU8(a[63:48]) +dst[39:32] := SaturateU8(b[15:0]) +dst[47:40] := SaturateU8(b[31:16]) +dst[55:48] := SaturateU8(b[47:32]) +dst[63:56] := SaturateU8(b[63:48]) + + + MMX +
mmintrin.h
+ Miscellaneous
- - - - Unpack and interleave 8-bit integers from the high half of "a" and "b", and - store the results in "dst". - - DEFINE INTERLEAVE_HIGH_BYTES(src1[63:0], src2[63:0]) { - dst[7:0] := src1[39:32] - dst[15:8] := src2[39:32] - dst[23:16] := src1[47:40] - dst[31:24] := src2[47:40] - dst[39:32] := src1[55:48] - dst[47:40] := src2[55:48] - dst[55:48] := src1[63:56] - dst[63:56] := src2[63:56] - RETURN dst[63:0] - } - dst[63:0] := INTERLEAVE_HIGH_BYTES(a[63:0], b[63:0]) - - - MMX -
mmintrin.h
- Swizzle + + + + Unpack and interleave 8-bit integers from the high half of "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_HIGH_BYTES(src1[63:0], src2[63:0]) { + dst[7:0] := src1[39:32] + dst[15:8] := src2[39:32] + dst[23:16] := src1[47:40] + dst[31:24] := src2[47:40] + dst[39:32] := src1[55:48] + dst[47:40] := src2[55:48] + dst[55:48] := src1[63:56] + dst[63:56] := src2[63:56] + RETURN dst[63:0] +} +dst[63:0] := INTERLEAVE_HIGH_BYTES(a[63:0], b[63:0]) + + + MMX +
mmintrin.h
+ Swizzle
- - - - Unpack and interleave 16-bit integers from the high half of "a" and "b", and - store the results in "dst". - - DEFINE INTERLEAVE_HIGH_WORDS(src1[63:0], src2[63:0]) { - dst[15:0] := src1[47:32] - dst[31:16] := src2[47:32] - dst[47:32] := src1[63:48] - dst[63:48] := src2[63:48] - RETURN dst[63:0] - } - dst[63:0] := INTERLEAVE_HIGH_WORDS(a[63:0], b[63:0]) - - - MMX -
mmintrin.h
- Swizzle + + + + Unpack and interleave 16-bit integers from the high half of "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_HIGH_WORDS(src1[63:0], src2[63:0]) { + dst[15:0] := src1[47:32] + dst[31:16] := src2[47:32] + dst[47:32] := src1[63:48] + dst[63:48] := src2[63:48] + RETURN dst[63:0] +} +dst[63:0] := INTERLEAVE_HIGH_WORDS(a[63:0], b[63:0]) + + + MMX +
mmintrin.h
+ Swizzle
- - - - Unpack and interleave 32-bit integers from the high half of "a" and "b", and - store the results in "dst". - - dst[31:0] := a[63:32] - dst[63:32] := b[63:32] - - - MMX -
mmintrin.h
- Swizzle + + + + Unpack and interleave 32-bit integers from the high half of "a" and "b", and store the results in "dst". + +dst[31:0] := a[63:32] +dst[63:32] := b[63:32] + + + MMX +
mmintrin.h
+ Swizzle
- - - - Unpack and interleave 8-bit integers from the low half of "a" and "b", and - store the results in "dst". - - DEFINE INTERLEAVE_BYTES(src1[63:0], src2[63:0]) { - dst[7:0] := src1[7:0] - dst[15:8] := src2[7:0] - dst[23:16] := src1[15:8] - dst[31:24] := src2[15:8] - dst[39:32] := src1[23:16] - dst[47:40] := src2[23:16] - dst[55:48] := src1[31:24] - dst[63:56] := src2[31:24] - RETURN dst[63:0] - } - dst[63:0] := INTERLEAVE_BYTES(a[63:0], b[63:0]) - - - MMX -
mmintrin.h
- Swizzle + + + + Unpack and interleave 8-bit integers from the low half of "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_BYTES(src1[63:0], src2[63:0]) { + dst[7:0] := src1[7:0] + dst[15:8] := src2[7:0] + dst[23:16] := src1[15:8] + dst[31:24] := src2[15:8] + dst[39:32] := src1[23:16] + dst[47:40] := src2[23:16] + dst[55:48] := src1[31:24] + dst[63:56] := src2[31:24] + RETURN dst[63:0] +} +dst[63:0] := INTERLEAVE_BYTES(a[63:0], b[63:0]) + + + MMX +
mmintrin.h
+ Swizzle
- - - - Unpack and interleave 16-bit integers from the low half of "a" and "b", and - store the results in "dst". - - DEFINE INTERLEAVE_WORDS(src1[63:0], src2[63:0]) { - dst[15:0] := src1[15:0] - dst[31:16] := src2[15:0] - dst[47:32] := src1[31:16] - dst[63:48] := src2[31:16] - RETURN dst[63:0] - } - dst[63:0] := INTERLEAVE_WORDS(a[63:0], b[63:0]) - - - MMX -
mmintrin.h
- Swizzle + + + + Unpack and interleave 16-bit integers from the low half of "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_WORDS(src1[63:0], src2[63:0]) { + dst[15:0] := src1[15:0] + dst[31:16] := src2[15:0] + dst[47:32] := src1[31:16] + dst[63:48] := src2[31:16] + RETURN dst[63:0] +} +dst[63:0] := INTERLEAVE_WORDS(a[63:0], b[63:0]) + + + MMX +
mmintrin.h
+ Swizzle
- - - - Unpack and interleave 32-bit integers from the low half of "a" and "b", and - store the results in "dst". - - dst[31:0] := a[31:0] - dst[63:32] := b[31:0] - - - MMX -
mmintrin.h
- Swizzle + + + + Unpack and interleave 32-bit integers from the low half of "a" and "b", and store the results in "dst". + +dst[31:0] := a[31:0] +dst[63:32] := b[31:0] + + + MMX +
mmintrin.h
+ Swizzle
- - - - Unpack and interleave 8-bit integers from the high half of "a" and "b", and - store the results in "dst". - - DEFINE INTERLEAVE_HIGH_BYTES(src1[63:0], src2[63:0]) { - dst[7:0] := src1[39:32] - dst[15:8] := src2[39:32] - dst[23:16] := src1[47:40] - dst[31:24] := src2[47:40] - dst[39:32] := src1[55:48] - dst[47:40] := src2[55:48] - dst[55:48] := src1[63:56] - dst[63:56] := src2[63:56] - RETURN dst[63:0] - } - dst[63:0] := INTERLEAVE_HIGH_BYTES(a[63:0], b[63:0]) - - - MMX -
mmintrin.h
- Swizzle + + + + Unpack and interleave 8-bit integers from the high half of "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_HIGH_BYTES(src1[63:0], src2[63:0]) { + dst[7:0] := src1[39:32] + dst[15:8] := src2[39:32] + dst[23:16] := src1[47:40] + dst[31:24] := src2[47:40] + dst[39:32] := src1[55:48] + dst[47:40] := src2[55:48] + dst[55:48] := src1[63:56] + dst[63:56] := src2[63:56] + RETURN dst[63:0] +} +dst[63:0] := INTERLEAVE_HIGH_BYTES(a[63:0], b[63:0]) + + + MMX +
mmintrin.h
+ Swizzle
- - - - Unpack and interleave 16-bit integers from the high half of "a" and "b", and - store the results in "dst". - - DEFINE INTERLEAVE_HIGH_WORDS(src1[63:0], src2[63:0]) { - dst[15:0] := src1[47:32] - dst[31:16] := src2[47:32] - dst[47:32] := src1[63:48] - dst[63:48] := src2[63:48] - RETURN dst[63:0] - } - dst[63:0] := INTERLEAVE_HIGH_WORDS(a[63:0], b[63:0]) - - - MMX -
mmintrin.h
- Swizzle + + + + Unpack and interleave 16-bit integers from the high half of "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_HIGH_WORDS(src1[63:0], src2[63:0]) { + dst[15:0] := src1[47:32] + dst[31:16] := src2[47:32] + dst[47:32] := src1[63:48] + dst[63:48] := src2[63:48] + RETURN dst[63:0] +} +dst[63:0] := INTERLEAVE_HIGH_WORDS(a[63:0], b[63:0]) + + + MMX +
mmintrin.h
+ Swizzle
- - - - Unpack and interleave 32-bit integers from the high half of "a" and "b", and - store the results in "dst". - - dst[31:0] := a[63:32] - dst[63:32] := b[63:32] - - - MMX -
mmintrin.h
- Swizzle + + + + Unpack and interleave 32-bit integers from the high half of "a" and "b", and store the results in "dst". + +dst[31:0] := a[63:32] +dst[63:32] := b[63:32] + + + MMX +
mmintrin.h
+ Swizzle
- - - - Unpack and interleave 8-bit integers from the low half of "a" and "b", and - store the results in "dst". - - DEFINE INTERLEAVE_BYTES(src1[63:0], src2[63:0]) { - dst[7:0] := src1[7:0] - dst[15:8] := src2[7:0] - dst[23:16] := src1[15:8] - dst[31:24] := src2[15:8] - dst[39:32] := src1[23:16] - dst[47:40] := src2[23:16] - dst[55:48] := src1[31:24] - dst[63:56] := src2[31:24] - RETURN dst[63:0] - } - dst[63:0] := INTERLEAVE_BYTES(a[63:0], b[63:0]) - - - MMX -
mmintrin.h
- Swizzle + + + + Unpack and interleave 8-bit integers from the low half of "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_BYTES(src1[63:0], src2[63:0]) { + dst[7:0] := src1[7:0] + dst[15:8] := src2[7:0] + dst[23:16] := src1[15:8] + dst[31:24] := src2[15:8] + dst[39:32] := src1[23:16] + dst[47:40] := src2[23:16] + dst[55:48] := src1[31:24] + dst[63:56] := src2[31:24] + RETURN dst[63:0] +} +dst[63:0] := INTERLEAVE_BYTES(a[63:0], b[63:0]) + + + MMX +
mmintrin.h
+ Swizzle
- - - - Unpack and interleave 16-bit integers from the low half of "a" and "b", and - store the results in "dst". - - DEFINE INTERLEAVE_WORDS(src1[63:0], src2[63:0]) { - dst[15:0] := src1[15:0] - dst[31:16] := src2[15:0] - dst[47:32] := src1[31:16] - dst[63:48] := src2[31:16] - RETURN dst[63:0] - } - dst[63:0] := INTERLEAVE_WORDS(a[63:0], b[63:0]) - - - MMX -
mmintrin.h
- Swizzle + + + + Unpack and interleave 16-bit integers from the low half of "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_WORDS(src1[63:0], src2[63:0]) { + dst[15:0] := src1[15:0] + dst[31:16] := src2[15:0] + dst[47:32] := src1[31:16] + dst[63:48] := src2[31:16] + RETURN dst[63:0] +} +dst[63:0] := INTERLEAVE_WORDS(a[63:0], b[63:0]) + + + MMX +
mmintrin.h
+ Swizzle
- - - - Unpack and interleave 32-bit integers from the low half of "a" and "b", and - store the results in "dst". - - dst[31:0] := a[31:0] - dst[63:32] := b[31:0] - - - MMX -
mmintrin.h
- Swizzle + + + + Unpack and interleave 32-bit integers from the low half of "a" and "b", and store the results in "dst". + +dst[31:0] := a[31:0] +dst[63:32] := b[31:0] + + + MMX +
mmintrin.h
+ Swizzle
- - - - Add packed 8-bit integers in "a" and "b", and store the results in "dst". - - FOR j := 0 to 7 - i := j*8 - dst[i+7:i] := a[i+7:i] + b[i+7:i] - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Add packed 8-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := a[i+7:i] + b[i+7:i] +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Add packed 16-bit integers in "a" and "b", and store the results in "dst". - - FOR j := 0 to 3 - i := j*16 - dst[i+15:i] := a[i+15:i] + b[i+15:i] - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Add packed 16-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := a[i+15:i] + b[i+15:i] +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Add packed 32-bit integers in "a" and "b", and store the results in "dst". - - FOR j := 0 to 1 - i := j*32 - dst[i+31:i] := a[i+31:i] + b[i+31:i] - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Add packed 32-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + dst[i+31:i] := a[i+31:i] + b[i+31:i] +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Add packed signed 8-bit integers in "a" and "b" using saturation, and store the - results in "dst". - - FOR j := 0 to 7 - i := j*8 - dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] ) - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Add packed signed 8-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] ) +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Add packed signed 16-bit integers in "a" and "b" using saturation, and store - the results in "dst". - - FOR j := 0 to 3 - i := j*16 - dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] ) - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Add packed signed 16-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] ) +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store - the results in "dst". - - FOR j := 0 to 7 - i := j*8 - dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] ) - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] ) +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store - the results in "dst". - - FOR j := 0 to 3 - i := j*16 - dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] ) - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] ) +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and - store the results in "dst". - - FOR j := 0 to 7 - i := j*8 - dst[i+7:i] := a[i+7:i] - b[i+7:i] - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := a[i+7:i] - b[i+7:i] +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and - store the results in "dst". - - FOR j := 0 to 3 - i := j*16 - dst[i+15:i] := a[i+15:i] - b[i+15:i] - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := a[i+15:i] - b[i+15:i] +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and - store the results in "dst". - - FOR j := 0 to 1 - i := j*32 - dst[i+31:i] := a[i+31:i] - b[i+31:i] - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + dst[i+31:i] := a[i+31:i] - b[i+31:i] +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" - using saturation, and store the results in "dst". - - FOR j := 0 to 7 - i := j*8 - dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i]) - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i]) +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in - "a" using saturation, and store the results in "dst". - - FOR j := 0 to 3 - i := j*16 - dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i]) - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i]) +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit - integers in "a" using saturation, and store the results in "dst". - - FOR j := 0 to 7 - i := j*8 - dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i]) - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i]) +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit - integers in "a" using saturation, and store the results in "dst". - - FOR j := 0 to 3 - i := j*16 - dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i]) - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i]) +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Multiply packed signed 16-bit integers in "a" and "b", producing intermediate - signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, - and pack the results in "dst". - - FOR j := 0 to 1 - i := j*32 - dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + - SignExtend32(a[i+15:i]*b[i+15:i]) - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst". + +FOR j := 0 to 1 + i := j*32 + dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + SignExtend32(a[i+15:i]*b[i+15:i]) +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Multiply the packed signed 16-bit integers in "a" and "b", producing - intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in - "dst". - - FOR j := 0 to 3 - i := j*16 - tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) - dst[i+15:i] := tmp[31:16] - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Multiply the packed signed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 3 + i := j*16 + tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) + dst[i+15:i] := tmp[31:16] +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Multiply the packed 16-bit integers in "a" and "b", producing intermediate - 32-bit integers, and store the low 16 bits of the intermediate integers in "dst". - - FOR j := 0 to 3 - i := j*16 - tmp[31:0] := a[i+15:i] * b[i+15:i] - dst[i+15:i] := tmp[15:0] - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 3 + i := j*16 + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[15:0] +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Add packed 8-bit integers in "a" and "b", and store the results in "dst". - - FOR j := 0 to 7 - i := j*8 - dst[i+7:i] := a[i+7:i] + b[i+7:i] - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Add packed 8-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := a[i+7:i] + b[i+7:i] +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Add packed 16-bit integers in "a" and "b", and store the results in "dst". - - FOR j := 0 to 3 - i := j*16 - dst[i+15:i] := a[i+15:i] + b[i+15:i] - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Add packed 16-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := a[i+15:i] + b[i+15:i] +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Add packed 32-bit integers in "a" and "b", and store the results in "dst". - - FOR j := 0 to 1 - i := j*32 - dst[i+31:i] := a[i+31:i] + b[i+31:i] - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Add packed 32-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + dst[i+31:i] := a[i+31:i] + b[i+31:i] +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Add packed signed 8-bit integers in "a" and "b" using saturation, and store the - results in "dst". - - FOR j := 0 to 7 - i := j*8 - dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] ) - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Add packed signed 8-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] ) +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Add packed signed 16-bit integers in "a" and "b" using saturation, and store - the results in "dst". - - FOR j := 0 to 3 - i := j*16 - dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] ) - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Add packed signed 16-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] ) +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store - the results in "dst". - - FOR j := 0 to 7 - i := j*8 - dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] ) - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] ) +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store - the results in "dst". - - FOR j := 0 to 3 - i := j*16 - dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] ) - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] ) +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and - store the results in "dst". - - FOR j := 0 to 7 - i := j*8 - dst[i+7:i] := a[i+7:i] - b[i+7:i] - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := a[i+7:i] - b[i+7:i] +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and - store the results in "dst". - - FOR j := 0 to 3 - i := j*16 - dst[i+15:i] := a[i+15:i] - b[i+15:i] - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := a[i+15:i] - b[i+15:i] +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and - store the results in "dst". - - FOR j := 0 to 1 - i := j*32 - dst[i+31:i] := a[i+31:i] - b[i+31:i] - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + dst[i+31:i] := a[i+31:i] - b[i+31:i] +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" - using saturation, and store the results in "dst". - - FOR j := 0 to 7 - i := j*8 - dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i]) - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i]) +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in - "a" using saturation, and store the results in "dst". - - FOR j := 0 to 3 - i := j*16 - dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i]) - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i]) +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit - integers in "a" using saturation, and store the results in "dst". - - FOR j := 0 to 7 - i := j*8 - dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i]) - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i]) +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit - integers in "a" using saturation, and store the results in "dst". - - FOR j := 0 to 3 - i := j*16 - dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i]) - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i]) +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Multiply packed signed 16-bit integers in "a" and "b", producing intermediate - signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, - and pack the results in "dst". - - FOR j := 0 to 1 - i := j*32 - dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + - SignExtend32(a[i+15:i]*b[i+15:i]) - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst". + +FOR j := 0 to 1 + i := j*32 + dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + SignExtend32(a[i+15:i]*b[i+15:i]) +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Multiply the packed signed 16-bit integers in "a" and "b", producing - intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in - "dst". - - FOR j := 0 to 3 - i := j*16 - tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) - dst[i+15:i] := tmp[31:16] - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Multiply the packed signed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 3 + i := j*16 + tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) + dst[i+15:i] := tmp[31:16] +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Multiply the packed 16-bit integers in "a" and "b", producing intermediate - 32-bit integers, and store the low 16 bits of the intermediate integers in "dst". - - FOR j := 0 to 3 - i := j*16 - tmp[31:0] := a[i+15:i] * b[i+15:i] - dst[i+15:i] := tmp[15:0] - ENDFOR - - - MMX -
mmintrin.h
- Arithmetic + + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 3 + i := j*16 + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[15:0] +ENDFOR + + + MMX +
mmintrin.h
+ Arithmetic
- - - - Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 3 - i := j*16 - IF count[63:0] > 15 - dst[i+15:i] := 0 - ELSE - dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[63:0]) - FI - ENDFOR - - - MMX -
mmintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[63:0]) + FI +ENDFOR + + + MMX +
mmintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst". - - FOR j := 0 to 3 - i := j*16 - IF imm8[7:0] > 15 - dst[i+15:i] := 0 - ELSE - dst[i+15:i] := ZeroExtend16(a[i+15:i] << imm8[7:0]) - FI - ENDFOR - - - MMX -
mmintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend16(a[i+15:i] << imm8[7:0]) + FI +ENDFOR + + + MMX +
mmintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 1 - i := j*32 - IF count[63:0] > 31 - dst[i+31:i] := 0 - ELSE - dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[63:0]) - FI - ENDFOR - - - MMX -
mmintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[63:0]) + FI +ENDFOR + + + MMX +
mmintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst". - - FOR j := 0 to 1 - i := j*32 - IF imm8[7:0] > 31 - dst[i+31:i] := 0 - ELSE - dst[i+31:i] := ZeroExtend32(a[i+31:i] << imm8[7:0]) - FI - ENDFOR - - - MMX -
mmintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend32(a[i+31:i] << imm8[7:0]) + FI +ENDFOR + + + MMX +
mmintrin.h
+ Shift
- - - - Shift 64-bit integer "a" left by "count" while shifting in zeros, and store the - result in "dst". - - IF count[63:0] > 63 - dst[63:0] := 0 - ELSE - dst[63:0] := ZeroExtend64(a[63:0] << count[63:0]) - FI - - - MMX -
mmintrin.h
- Shift + + + + Shift 64-bit integer "a" left by "count" while shifting in zeros, and store the result in "dst". + +IF count[63:0] > 63 + dst[63:0] := 0 +ELSE + dst[63:0] := ZeroExtend64(a[63:0] << count[63:0]) +FI + + + MMX +
mmintrin.h
+ Shift
- - - - Shift 64-bit integer "a" left by "imm8" while shifting in zeros, and store the - result in "dst". - - IF imm8[7:0] > 63 - dst[63:0] := 0 - ELSE - dst[63:0] := ZeroExtend64(a[63:0] << imm8[7:0]) - FI - - - MMX -
mmintrin.h
- Shift + + + + Shift 64-bit integer "a" left by "imm8" while shifting in zeros, and store the result in "dst". + +IF imm8[7:0] > 63 + dst[63:0] := 0 +ELSE + dst[63:0] := ZeroExtend64(a[63:0] << imm8[7:0]) +FI + + + MMX +
mmintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst". - - FOR j := 0 to 3 - i := j*16 - IF count[63:0] > 15 - dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) - ELSE - dst[i+15:i] := SignExtend16(a[i+15:i] >> count[63:0]) - FI - ENDFOR - - - MMX -
mmintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) + ELSE + dst[i+15:i] := SignExtend16(a[i+15:i] >> count[63:0]) + FI +ENDFOR + + + MMX +
mmintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst". - - FOR j := 0 to 3 - i := j*16 - IF imm8[7:0] > 15 - dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) - ELSE - dst[i+15:i] := SignExtend16(a[i+15:i] >> imm8[7:0]) - FI - ENDFOR - - - MMX -
mmintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) + ELSE + dst[i+15:i] := SignExtend16(a[i+15:i] >> imm8[7:0]) + FI +ENDFOR + + + MMX +
mmintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst". - - FOR j := 0 to 1 - i := j*32 - IF count[63:0] > 31 - dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) - ELSE - dst[i+31:i] := SignExtend32(a[i+31:i] >> count[63:0]) - FI - ENDFOR - - - MMX -
mmintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) + ELSE + dst[i+31:i] := SignExtend32(a[i+31:i] >> count[63:0]) + FI +ENDFOR + + + MMX +
mmintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst". - - FOR j := 0 to 1 - i := j*32 - IF imm8[7:0] > 31 - dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) - ELSE - dst[i+31:i] := SignExtend32(a[i+31:i] >> imm8[7:0]) - FI - ENDFOR - - - MMX -
mmintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) + ELSE + dst[i+31:i] := SignExtend32(a[i+31:i] >> imm8[7:0]) + FI +ENDFOR + + + MMX +
mmintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 3 - i := j*16 - IF count[63:0] > 15 - dst[i+15:i] := 0 - ELSE - dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[63:0]) - FI - ENDFOR - - - MMX -
mmintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[63:0]) + FI +ENDFOR + + + MMX +
mmintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 3 - i := j*16 - IF imm8[7:0] > 15 - dst[i+15:i] := 0 - ELSE - dst[i+15:i] := ZeroExtend16(a[i+15:i] >> imm8[7:0]) - FI - ENDFOR - - - MMX -
mmintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend16(a[i+15:i] >> imm8[7:0]) + FI +ENDFOR + + + MMX +
mmintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 1 - i := j*32 - IF count[63:0] > 31 - dst[i+31:i] := 0 - ELSE - dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[63:0]) - FI - ENDFOR - - - MMX -
mmintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[63:0]) + FI +ENDFOR + + + MMX +
mmintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 1 - i := j*32 - IF imm8[7:0] > 31 - dst[i+31:i] := 0 - ELSE - dst[i+31:i] := ZeroExtend32(a[i+31:i] >> imm8[7:0]) - FI - ENDFOR - - - MMX -
mmintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend32(a[i+31:i] >> imm8[7:0]) + FI +ENDFOR + + + MMX +
mmintrin.h
+ Shift
- - - - Shift 64-bit integer "a" right by "count" while shifting in zeros, and store - the result in "dst". - - IF count[63:0] > 63 - dst[63:0] := 0 - ELSE - dst[63:0] := ZeroExtend64(a[63:0] >> count[63:0]) - FI - - - MMX -
mmintrin.h
- Shift + + + + Shift 64-bit integer "a" right by "count" while shifting in zeros, and store the result in "dst". + +IF count[63:0] > 63 + dst[63:0] := 0 +ELSE + dst[63:0] := ZeroExtend64(a[63:0] >> count[63:0]) +FI + + + MMX +
mmintrin.h
+ Shift
- - - - Shift 64-bit integer "a" right by "imm8" while shifting in zeros, and store the - result in "dst". - - IF imm8[7:0] > 63 - dst[63:0] := 0 - ELSE - dst[63:0] := ZeroExtend64(a[63:0] >> imm8[7:0]) - FI - - - MMX -
mmintrin.h
- Shift + + + + Shift 64-bit integer "a" right by "imm8" while shifting in zeros, and store the result in "dst". + +IF imm8[7:0] > 63 + dst[63:0] := 0 +ELSE + dst[63:0] := ZeroExtend64(a[63:0] >> imm8[7:0]) +FI + + + MMX +
mmintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 3 - i := j*16 - IF count[63:0] > 15 - dst[i+15:i] := 0 - ELSE - dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[63:0]) - FI - ENDFOR - - - MMX -
mmintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[63:0]) + FI +ENDFOR + + + MMX +
mmintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst". - - FOR j := 0 to 3 - i := j*16 - IF imm8[7:0] > 15 - dst[i+15:i] := 0 - ELSE - dst[i+15:i] := ZeroExtend16(a[i+15:i] << imm8[7:0]) - FI - ENDFOR - - - MMX -
mmintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend16(a[i+15:i] << imm8[7:0]) + FI +ENDFOR + + + MMX +
mmintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 1 - i := j*32 - IF count[63:0] > 31 - dst[i+31:i] := 0 - ELSE - dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[63:0]) - FI - ENDFOR - - - MMX -
mmintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[63:0]) + FI +ENDFOR + + + MMX +
mmintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst". - - FOR j := 0 to 1 - i := j*32 - IF imm8[7:0] > 31 - dst[i+31:i] := 0 - ELSE - dst[i+31:i] := ZeroExtend32(a[i+31:i] << imm8[7:0]) - FI - ENDFOR - - - MMX -
mmintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend32(a[i+31:i] << imm8[7:0]) + FI +ENDFOR + + + MMX +
mmintrin.h
+ Shift
- - - - Shift 64-bit integer "a" left by "count" while shifting in zeros, and store the - result in "dst". - - IF count[63:0] > 63 - dst[63:0] := 0 - ELSE - dst[63:0] := ZeroExtend64(a[63:0] << count[63:0]) - FI - - - MMX -
mmintrin.h
- Shift + + + + Shift 64-bit integer "a" left by "count" while shifting in zeros, and store the result in "dst". + +IF count[63:0] > 63 + dst[63:0] := 0 +ELSE + dst[63:0] := ZeroExtend64(a[63:0] << count[63:0]) +FI + + + MMX +
mmintrin.h
+ Shift
- - - - Shift 64-bit integer "a" left by "imm8" while shifting in zeros, and store the - result in "dst". - - IF imm8[7:0] > 63 - dst[63:0] := 0 - ELSE - dst[63:0] := ZeroExtend64(a[63:0] << imm8[7:0]) - FI - - - MMX -
mmintrin.h
- Shift + + + + Shift 64-bit integer "a" left by "imm8" while shifting in zeros, and store the result in "dst". + +IF imm8[7:0] > 63 + dst[63:0] := 0 +ELSE + dst[63:0] := ZeroExtend64(a[63:0] << imm8[7:0]) +FI + + + MMX +
mmintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst". - - FOR j := 0 to 3 - i := j*16 - IF count[63:0] > 15 - dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) - ELSE - dst[i+15:i] := SignExtend16(a[i+15:i] >> count[63:0]) - FI - ENDFOR - - - MMX -
mmintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) + ELSE + dst[i+15:i] := SignExtend16(a[i+15:i] >> count[63:0]) + FI +ENDFOR + + + MMX +
mmintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst". - - FOR j := 0 to 3 - i := j*16 - IF imm8[7:0] > 15 - dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) - ELSE - dst[i+15:i] := SignExtend16(a[i+15:i] >> imm8[7:0]) - FI - ENDFOR - - - MMX -
mmintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) + ELSE + dst[i+15:i] := SignExtend16(a[i+15:i] >> imm8[7:0]) + FI +ENDFOR + + + MMX +
mmintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst". - - FOR j := 0 to 1 - i := j*32 - IF count[63:0] > 31 - dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) - ELSE - dst[i+31:i] := SignExtend32(a[i+31:i] >> count[63:0]) - FI - ENDFOR - - - MMX -
mmintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) + ELSE + dst[i+31:i] := SignExtend32(a[i+31:i] >> count[63:0]) + FI +ENDFOR + + + MMX +
mmintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst". - - FOR j := 0 to 1 - i := j*32 - IF imm8[7:0] > 31 - dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) - ELSE - dst[i+31:i] := SignExtend32(a[i+31:i] >> imm8[7:0]) - FI - ENDFOR - - - MMX -
mmintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) + ELSE + dst[i+31:i] := SignExtend32(a[i+31:i] >> imm8[7:0]) + FI +ENDFOR + + + MMX +
mmintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 3 - i := j*16 - IF count[63:0] > 15 - dst[i+15:i] := 0 - ELSE - dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[63:0]) - FI - ENDFOR - - - MMX -
mmintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[63:0]) + FI +ENDFOR + + + MMX +
mmintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 3 - i := j*16 - IF imm8[7:0] > 15 - dst[i+15:i] := 0 - ELSE - dst[i+15:i] := ZeroExtend16(a[i+15:i] >> imm8[7:0]) - FI - ENDFOR - - - MMX -
mmintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend16(a[i+15:i] >> imm8[7:0]) + FI +ENDFOR + + + MMX +
mmintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 1 - i := j*32 - IF count[63:0] > 31 - dst[i+31:i] := 0 - ELSE - dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[63:0]) - FI - ENDFOR - - - MMX -
mmintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[63:0]) + FI +ENDFOR + + + MMX +
mmintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 1 - i := j*32 - IF imm8[7:0] > 31 - dst[i+31:i] := 0 - ELSE - dst[i+31:i] := ZeroExtend32(a[i+31:i] >> imm8[7:0]) - FI - ENDFOR - - - MMX -
mmintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend32(a[i+31:i] >> imm8[7:0]) + FI +ENDFOR + + + MMX +
mmintrin.h
+ Shift
- - - - Shift 64-bit integer "a" right by "count" while shifting in zeros, and store - the result in "dst". - - IF count[63:0] > 63 - dst[63:0] := 0 - ELSE - dst[63:0] := ZeroExtend64(a[63:0] >> count[63:0]) - FI - - - MMX -
mmintrin.h
- Shift + + + + Shift 64-bit integer "a" right by "count" while shifting in zeros, and store the result in "dst". + +IF count[63:0] > 63 + dst[63:0] := 0 +ELSE + dst[63:0] := ZeroExtend64(a[63:0] >> count[63:0]) +FI + + + MMX +
mmintrin.h
+ Shift
- - - - Shift 64-bit integer "a" right by "imm8" while shifting in zeros, and store the - result in "dst". - - IF imm8[7:0] > 63 - dst[63:0] := 0 - ELSE - dst[63:0] := ZeroExtend64(a[63:0] >> imm8[7:0]) - FI - - - MMX -
mmintrin.h
- Shift + + + + Shift 64-bit integer "a" right by "imm8" while shifting in zeros, and store the result in "dst". + +IF imm8[7:0] > 63 + dst[63:0] := 0 +ELSE + dst[63:0] := ZeroExtend64(a[63:0] >> imm8[7:0]) +FI + + + MMX +
mmintrin.h
+ Shift
- - - - Compute the bitwise AND of 64 bits (representing integer data) in "a" and "b", - and store the result in "dst". - - dst[63:0] := (a[63:0] AND b[63:0]) - - - MMX -
mmintrin.h
- Logical + + + + Compute the bitwise AND of 64 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[63:0] := (a[63:0] AND b[63:0]) + + + MMX +
mmintrin.h
+ Logical
- - - - Compute the bitwise NOT of 64 bits (representing integer data) in "a" and then - AND with "b", and store the result in "dst". - - dst[63:0] := ((NOT a[63:0]) AND b[63:0]) - - - MMX -
mmintrin.h
- Logical + + + + Compute the bitwise NOT of 64 bits (representing integer data) in "a" and then AND with "b", and store the result in "dst". + +dst[63:0] := ((NOT a[63:0]) AND b[63:0]) + + + MMX +
mmintrin.h
+ Logical
- - - - Compute the bitwise OR of 64 bits (representing integer data) in "a" and "b", - and store the result in "dst". - - dst[63:0] := (a[63:0] OR b[63:0]) - - - MMX -
mmintrin.h
- Logical + + + + Compute the bitwise OR of 64 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[63:0] := (a[63:0] OR b[63:0]) + + + MMX +
mmintrin.h
+ Logical
- - - - Compute the bitwise XOR of 64 bits (representing integer data) in "a" and "b", - and store the result in "dst". - - dst[63:0] := (a[63:0] XOR b[63:0]) - - - MMX -
mmintrin.h
- Logical + + + + Compute the bitwise XOR of 64 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[63:0] := (a[63:0] XOR b[63:0]) + + + MMX +
mmintrin.h
+ Logical
- - - - Compute the bitwise AND of 64 bits (representing integer data) in "a" and "b", - and store the result in "dst". - - dst[63:0] := (a[63:0] AND b[63:0]) - - - MMX -
mmintrin.h
- Logical + + + + Compute the bitwise AND of 64 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[63:0] := (a[63:0] AND b[63:0]) + + + MMX +
mmintrin.h
+ Logical
- - - - Compute the bitwise NOT of 64 bits (representing integer data) in "a" and then - AND with "b", and store the result in "dst". - - dst[63:0] := ((NOT a[63:0]) AND b[63:0]) - - - MMX -
mmintrin.h
- Logical + + + + Compute the bitwise NOT of 64 bits (representing integer data) in "a" and then AND with "b", and store the result in "dst". + +dst[63:0] := ((NOT a[63:0]) AND b[63:0]) + + + MMX +
mmintrin.h
+ Logical
- - - - Compute the bitwise OR of 64 bits (representing integer data) in "a" and "b", - and store the result in "dst". - - dst[63:0] := (a[63:0] OR b[63:0]) - - - MMX -
mmintrin.h
- Logical + + + + Compute the bitwise OR of 64 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[63:0] := (a[63:0] OR b[63:0]) + + + MMX +
mmintrin.h
+ Logical
- - - - Compute the bitwise XOR of 64 bits (representing integer data) in "a" and "b", - and store the result in "dst". - - dst[63:0] := (a[63:0] XOR b[63:0]) - - - MMX -
mmintrin.h
- Logical + + + + Compute the bitwise XOR of 64 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[63:0] := (a[63:0] XOR b[63:0]) + + + MMX +
mmintrin.h
+ Logical
- - - - Compare packed 8-bit integers in "a" and "b" for equality, and store the - results in "dst". - - FOR j := 0 to 7 - i := j*8 - dst[i+7:i] := ( a[i+7:i] == b[i+7:i] ) ? 0xFF : 0 - ENDFOR - - - MMX -
mmintrin.h
- Compare + + + + Compare packed 8-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := ( a[i+7:i] == b[i+7:i] ) ? 0xFF : 0 +ENDFOR + + + MMX +
mmintrin.h
+ Compare
- - - - Compare packed 16-bit integers in "a" and "b" for equality, and store the - results in "dst". - - FOR j := 0 to 3 - i := j*16 - dst[i+15:i] := ( a[i+15:i] == b[i+15:i] ) ? 0xFFFF : 0 - ENDFOR - - - MMX -
mmintrin.h
- Compare + + + + Compare packed 16-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := ( a[i+15:i] == b[i+15:i] ) ? 0xFFFF : 0 +ENDFOR + + + MMX +
mmintrin.h
+ Compare
- - - - Compare packed 32-bit integers in "a" and "b" for equality, and store the - results in "dst". - - FOR j := 0 to 1 - i := j*32 - dst[i+31:i] := ( a[i+31:i] == b[i+31:i] ) ? 0xFFFFFFFF : 0 - ENDFOR - - - MMX -
mmintrin.h
- Compare + + + + Compare packed 32-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + dst[i+31:i] := ( a[i+31:i] == b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR + + + MMX +
mmintrin.h
+ Compare
- - - - Compare packed 8-bit integers in "a" and "b" for greater-than, and store the - results in "dst". - - FOR j := 0 to 7 - i := j*8 - dst[i+7:i] := ( a[i+7:i] > b[i+7:i] ) ? 0xFF : 0 - ENDFOR - - - MMX -
mmintrin.h
- Compare + + + + Compare packed 8-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := ( a[i+7:i] > b[i+7:i] ) ? 0xFF : 0 +ENDFOR + + + MMX +
mmintrin.h
+ Compare
- - - - Compare packed 16-bit integers in "a" and "b" for greater-than, and store the - results in "dst". - - FOR j := 0 to 3 - i := j*16 - dst[i+15:i] := ( a[i+15:i] > b[i+15:i] ) ? 0xFFFF : 0 - ENDFOR - - - MMX -
mmintrin.h
- Compare + + + + Compare packed 16-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := ( a[i+15:i] > b[i+15:i] ) ? 0xFFFF : 0 +ENDFOR + + + MMX +
mmintrin.h
+ Compare
- - - - Compare packed 32-bit integers in "a" and "b" for greater-than, and store the - results in "dst". - - FOR j := 0 to 1 - i := j*32 - dst[i+31:i] := ( a[i+31:i] > b[i+31:i] ) ? 0xFFFFFFFF : 0 - ENDFOR - - - MMX -
mmintrin.h
- Compare + + + + Compare packed 32-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + dst[i+31:i] := ( a[i+31:i] > b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR + + + MMX +
mmintrin.h
+ Compare
- - - - Compare packed 8-bit integers in "a" and "b" for equality, and store the - results in "dst". - - FOR j := 0 to 7 - i := j*8 - dst[i+7:i] := ( a[i+7:i] == b[i+7:i] ) ? 0xFF : 0 - ENDFOR - - - MMX -
mmintrin.h
- Compare + + + + Compare packed 8-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := ( a[i+7:i] == b[i+7:i] ) ? 0xFF : 0 +ENDFOR + + + MMX +
mmintrin.h
+ Compare
- - - - Compare packed 16-bit integers in "a" and "b" for equality, and store the - results in "dst". - - FOR j := 0 to 3 - i := j*16 - dst[i+15:i] := ( a[i+15:i] == b[i+15:i] ) ? 0xFFFF : 0 - ENDFOR - - - MMX -
mmintrin.h
- Compare + + + + Compare packed 16-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := ( a[i+15:i] == b[i+15:i] ) ? 0xFFFF : 0 +ENDFOR + + + MMX +
mmintrin.h
+ Compare
- - - - Compare packed 32-bit integers in "a" and "b" for equality, and store the - results in "dst". - - FOR j := 0 to 1 - i := j*32 - dst[i+31:i] := ( a[i+31:i] == b[i+31:i] ) ? 0xFFFFFFFF : 0 - ENDFOR - - - MMX -
mmintrin.h
- Compare + + + + Compare packed 32-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + dst[i+31:i] := ( a[i+31:i] == b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR + + + MMX +
mmintrin.h
+ Compare
- - - - Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store - the results in "dst". - - FOR j := 0 to 7 - i := j*8 - dst[i+7:i] := ( a[i+7:i] > b[i+7:i] ) ? 0xFF : 0 - ENDFOR - - - MMX -
mmintrin.h
- Compare + + + + Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := ( a[i+7:i] > b[i+7:i] ) ? 0xFF : 0 +ENDFOR + + + MMX +
mmintrin.h
+ Compare
- - - - Compare packed signed 16-bit integers in "a" and "b" for greater-than, and - store the results in "dst". - - FOR j := 0 to 3 - i := j*16 - dst[i+15:i] := ( a[i+15:i] > b[i+15:i] ) ? 0xFFFF : 0 - ENDFOR - - - MMX -
mmintrin.h
- Compare + + + + Compare packed signed 16-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := ( a[i+15:i] > b[i+15:i] ) ? 0xFFFF : 0 +ENDFOR + + + MMX +
mmintrin.h
+ Compare
- - - - Compare packed signed 32-bit integers in "a" and "b" for greater-than, and - store the results in "dst". - - FOR j := 0 to 1 - i := j*32 - dst[i+31:i] := ( a[i+31:i] > b[i+31:i] ) ? 0xFFFFFFFF : 0 - ENDFOR - - - MMX -
mmintrin.h
- Compare + + + + Compare packed signed 32-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + dst[i+31:i] := ( a[i+31:i] > b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR + + + MMX +
mmintrin.h
+ Compare
- - - Return vector of type __m64 with all elements set to zero. - - dst[MAX:0] := 0 - - - MMX -
mmintrin.h
- Set + + + Return vector of type __m64 with all elements set to zero. + +dst[MAX:0] := 0 + + + MMX +
mmintrin.h
+ Set
- - - - Set packed 32-bit integers in "dst" with the supplied values. - - dst[31:0] := e0 - dst[63:32] := e1 - - MMX -
mmintrin.h
- Set + + + + Set packed 32-bit integers in "dst" with the supplied values. + +dst[31:0] := e0 +dst[63:32] := e1 + + MMX +
mmintrin.h
+ Set
- - - - - - Set packed 16-bit integers in "dst" with the supplied values. - - dst[15:0] := e0 - dst[31:16] := e1 - dst[47:32] := e2 - dst[63:48] := e3 - - MMX -
mmintrin.h
- Set + + + + + + Set packed 16-bit integers in "dst" with the supplied values. + +dst[15:0] := e0 +dst[31:16] := e1 +dst[47:32] := e2 +dst[63:48] := e3 + + MMX +
mmintrin.h
+ Set
- - - - - - - - - - Set packed 8-bit integers in "dst" with the supplied values. - - dst[7:0] := e0 - dst[15:8] := e1 - dst[23:16] := e2 - dst[31:24] := e3 - dst[39:32] := e4 - dst[47:40] := e5 - dst[55:48] := e6 - dst[63:56] := e7 - - MMX -
mmintrin.h
- Set + + + + + + + + + + Set packed 8-bit integers in "dst" with the supplied values. + +dst[7:0] := e0 +dst[15:8] := e1 +dst[23:16] := e2 +dst[31:24] := e3 +dst[39:32] := e4 +dst[47:40] := e5 +dst[55:48] := e6 +dst[63:56] := e7 + + MMX +
mmintrin.h
+ Set
- - - Broadcast 32-bit integer "a" to all elements of "dst". - - FOR j := 0 to 1 - i := j*32 - dst[i+31:i] := a[31:0] - ENDFOR - - MMX -
mmintrin.h
- Set + + + Broadcast 32-bit integer "a" to all elements of "dst". + +FOR j := 0 to 1 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR + + MMX +
mmintrin.h
+ Set
- - - Broadcast 16-bit integer "a" to all all elements of "dst". - - FOR j := 0 to 3 - i := j*16 - dst[i+15:i] := a[15:0] - ENDFOR - - MMX -
mmintrin.h
- Set + + + Broadcast 16-bit integer "a" to all all elements of "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := a[15:0] +ENDFOR + + MMX +
mmintrin.h
+ Set
- - - Broadcast 8-bit integer "a" to all elements of "dst". - - FOR j := 0 to 7 - i := j*8 - dst[i+7:i] := a[7:0] - ENDFOR - - MMX -
mmintrin.h
- Set + + + Broadcast 8-bit integer "a" to all elements of "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := a[7:0] +ENDFOR + + MMX +
mmintrin.h
+ Set
- - - - Set packed 32-bit integers in "dst" with the supplied values in reverse order. - - dst[31:0] := e1 - dst[63:32] := e0 - - MMX -
mmintrin.h
- Set + + + + Set packed 32-bit integers in "dst" with the supplied values in reverse order. + +dst[31:0] := e1 +dst[63:32] := e0 + + MMX +
mmintrin.h
+ Set
- - - - - - Set packed 16-bit integers in "dst" with the supplied values in reverse order. - - dst[15:0] := e3 - dst[31:16] := e2 - dst[47:32] := e1 - dst[63:48] := e0 - - MMX -
mmintrin.h
- Set + + + + + + Set packed 16-bit integers in "dst" with the supplied values in reverse order. + +dst[15:0] := e3 +dst[31:16] := e2 +dst[47:32] := e1 +dst[63:48] := e0 + + MMX +
mmintrin.h
+ Set
- - - - - - - - - - Set packed 8-bit integers in "dst" with the supplied values in reverse order. - - dst[7:0] := e7 - dst[15:8] := e6 - dst[23:16] := e5 - dst[31:24] := e4 - dst[39:32] := e3 - dst[47:40] := e2 - dst[55:48] := e1 - dst[63:56] := e0 - - MMX -
mmintrin.h
- Set -
- - - - - - - - Arm address monitoring hardware using the address specified in "p". A store to - an address within the specified address range triggers the monitoring hardware. Specify - optional extensions in "extensions", and optional hints in "hints". - - MONITOR -
pmmintrin.h
- General Support + + + + + + + + + + Set packed 8-bit integers in "dst" with the supplied values in reverse order. + +dst[7:0] := e7 +dst[15:8] := e6 +dst[23:16] := e5 +dst[31:24] := e4 +dst[39:32] := e3 +dst[47:40] := e2 +dst[55:48] := e1 +dst[63:56] := e0 + + MMX +
mmintrin.h
+ Set +
+ + + + + + + + Arm address monitoring hardware using the address specified in "p". A store to an address within the specified address range triggers the monitoring hardware. Specify optional extensions in "extensions", and optional hints in "hints". + + MONITOR +
pmmintrin.h
+ General Support
- - - - Hint to the processor that it can enter an implementation-dependent-optimized - state while waiting for an event or store operation to the address range specified by - MONITOR. - - MONITOR -
pmmintrin.h
- General Support -
- - - - - - Load 16 bits from memory, perform a byte swap operation, and store the result - in "dst". - - FOR j := 0 to 1 - i := j*8 - dst[i+7:i] := MEM[ptr+15-i:ptr+8-i] - ENDFOR - - - MOVBE -
immintrin.h
- Load + + + + Hint to the processor that it can enter an implementation-dependent-optimized state while waiting for an event or store operation to the address range specified by MONITOR. + + MONITOR +
pmmintrin.h
+ General Support +
+ + + + + + Load 16 bits from memory, perform a byte swap operation, and store the result in "dst". + +FOR j := 0 to 1 + i := j*8 + dst[i+7:i] := MEM[ptr+15-i:ptr+8-i] +ENDFOR + + + MOVBE +
immintrin.h
+ Load
- - - Load 32 bits from memory, perform a byte swap operation, and store the result - in "dst". - - FOR j := 0 to 3 - i := j*8 - dst[i+7:i] := MEM[ptr+31-i:ptr+24-i] - ENDFOR - - - MOVBE -
immintrin.h
- Load + + + Load 32 bits from memory, perform a byte swap operation, and store the result in "dst". + +FOR j := 0 to 3 + i := j*8 + dst[i+7:i] := MEM[ptr+31-i:ptr+24-i] +ENDFOR + + + MOVBE +
immintrin.h
+ Load
- - - Load 64 bits from memory, perform a byte swap operation, and store the result - in "dst". - - FOR j := 0 to 7 - i := j*8 - dst[i+7:i] := MEM[ptr+63-i:ptr+56-i] - ENDFOR - - - MOVBE -
immintrin.h
- Load + + + Load 64 bits from memory, perform a byte swap operation, and store the result in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := MEM[ptr+63-i:ptr+56-i] +ENDFOR + + + MOVBE +
immintrin.h
+ Load
- - - - Perform a bit swap operation of the 16 bits in "data", and store the results to - memory. - - FOR j := 0 to 1 - i := j*8 - MEM[ptr+i+7:ptr+i] := data[15-i:8-i] - ENDFOR - - - MOVBE -
immintrin.h
- Store + + + + Perform a bit swap operation of the 16 bits in "data", and store the results to memory. + +FOR j := 0 to 1 + i := j*8 + MEM[ptr+i+7:ptr+i] := data[15-i:8-i] +ENDFOR + + + MOVBE +
immintrin.h
+ Store
- - - - Perform a bit swap operation of the 32 bits in "data", and store the results to - memory. - - addr := MEM[ptr] - FOR j := 0 to 3 - i := j*8 - MEM[ptr+i+7:ptr+i] := data[31-i:24-i] - ENDFOR - - - MOVBE -
immintrin.h
- Store + + + + Perform a bit swap operation of the 32 bits in "data", and store the results to memory. + +addr := MEM[ptr] +FOR j := 0 to 3 + i := j*8 + MEM[ptr+i+7:ptr+i] := data[31-i:24-i] +ENDFOR + + + MOVBE +
immintrin.h
+ Store
- - - - Perform a bit swap operation of the 64 bits in "data", and store the results to - memory. - - addr := MEM[ptr] - FOR j := 0 to 7 - i := j*8 - MEM[ptr+i+7:ptr+i] := data[63-i:56-i] - ENDFOR - - - MOVBE -
immintrin.h
- Store -
- - - - - - - Move 64-byte (512-bit) value using direct store from source memory address - "src" to destination memory address "dst". - - MEM[dst+511:dst] := MEM[src+511:src] - - - MOVDIR64B -
immintrin.h
- Store -
- - - - - - - Store 64-bit integer from "val" into memory using direct store. - - MEM[dst+63:dst] := val[63:0] - - - MOVDIRI -
immintrin.h
- Store + + + + Perform a bit swap operation of the 64 bits in "data", and store the results to memory. + +addr := MEM[ptr] +FOR j := 0 to 7 + i := j*8 + MEM[ptr+i+7:ptr+i] := data[63-i:56-i] +ENDFOR + + + MOVBE +
immintrin.h
+ Store +
+ + + + + + + Move 64-byte (512-bit) value using direct store from source memory address "src" to destination memory address "dst". + +MEM[dst+511:dst] := MEM[src+511:src] + + + MOVDIR64B +
immintrin.h
+ Store +
+ + + + + + + Store 64-bit integer from "val" into memory using direct store. + +MEM[dst+63:dst] := val[63:0] + + + MOVDIRI +
immintrin.h
+ Store
- - - - Store 32-bit integer from "val" into memory using direct store. - - MEM[dst+31:dst] := val[31:0] - - - MOVDIRI -
immintrin.h
- Store -
- - - - - - - Make a pointer with the value of "srcmem" and bounds set to ["srcmem", "srcmem" - + "size" - 1], and store the result in "dst". - dst := srcmem - dst.LB := srcmem.LB - dst.UB := srcmem + size - 1 - - - MPX -
immintrin.h
- Miscellaneous - + + + + Store 32-bit integer from "val" into memory using direct store. + +MEM[dst+31:dst] := val[31:0] + + + MOVDIRI +
immintrin.h
+ Store +
+ + + + + + + Make a pointer with the value of "srcmem" and bounds set to ["srcmem", "srcmem" + "size" - 1], and store the result in "dst". + dst := srcmem +dst.LB := srcmem.LB +dst.UB := srcmem + size - 1 + + + MPX +
immintrin.h
+ Miscellaneous +
- - - - - Narrow the bounds for pointer "q" to the intersection of the bounds of "r" and - the bounds ["q", "q" + "size" - 1], and store the result in "dst". - dst := q - IF r.LB > (q + size - 1) OR r.UB < q - dst.LB := 1 - dst.UB := 0 - ELSE - dst.LB := MAX(r.LB, q) - dst.UB := MIN(r.UB, (q + size - 1)) - FI - - MPX -
immintrin.h
- Miscellaneous - + + + + + Narrow the bounds for pointer "q" to the intersection of the bounds of "r" and the bounds ["q", "q" + "size" - 1], and store the result in "dst". + dst := q +IF r.LB > (q + size - 1) OR r.UB < q + dst.LB := 1 + dst.UB := 0 +ELSE + dst.LB := MAX(r.LB, q) + dst.UB := MIN(r.UB, (q + size - 1)) +FI + + MPX +
immintrin.h
+ Miscellaneous +
- - - - Make a pointer with the value of "q" and bounds set to the bounds of "r" (e.g. - copy the bounds of "r" to pointer "q"), and store the result in "dst". - dst := q - dst.LB := r.LB - dst.UB := r.UB - - MPX -
immintrin.h
- Miscellaneous - + + + + Make a pointer with the value of "q" and bounds set to the bounds of "r" (e.g. copy the bounds of "r" to pointer "q"), and store the result in "dst". + dst := q +dst.LB := r.LB +dst.UB := r.UB + + MPX +
immintrin.h
+ Miscellaneous +
- - - Make a pointer with the value of "q" and open bounds, which allow the pointer - to access the entire virtual address space, and store the result in "dst". - dst := q - dst.LB := 0 - dst.UB := 0 - - MPX -
immintrin.h
- Miscellaneous - + + + Make a pointer with the value of "q" and open bounds, which allow the pointer to access the entire virtual address space, and store the result in "dst". + dst := q +dst.LB := 0 +dst.UB := 0 + + MPX +
immintrin.h
+ Miscellaneous +
- - - - Stores the bounds of "ptr_val" pointer in memory at address "ptr_addr". - MEM[ptr_addr].LB := ptr_val.LB - MEM[ptr_addr].UB := ptr_val.UB - - - MPX -
immintrin.h
- Miscellaneous - + + + + Stores the bounds of "ptr_val" pointer in memory at address "ptr_addr". + MEM[ptr_addr].LB := ptr_val.LB +MEM[ptr_addr].UB := ptr_val.UB + + + MPX +
immintrin.h
+ Miscellaneous +
- - - Checks if "q" is within its lower bound, and throws a #BR if not. - IF q < q.LB - #BR - FI - - - MPX -
immintrin.h
- Miscellaneous - + + + Checks if "q" is within its lower bound, and throws a #BR if not. + IF q < q.LB + #BR +FI + + + MPX +
immintrin.h
+ Miscellaneous +
- - - Checks if "q" is within its upper bound, and throws a #BR if not. - IF q > q.UB - #BR - FI - - - - MPX -
immintrin.h
- Miscellaneous - + + + Checks if "q" is within its upper bound, and throws a #BR if not. + IF q > q.UB + #BR +FI + + + + MPX +
immintrin.h
+ Miscellaneous +
- - - - Checks if ["q", "q" + "size" - 1] is within the lower and upper bounds of "q" - and throws a #BR if not. - IF (q + size - 1) < q.LB OR (q + size - 1) > q.UB - #BR - FI - - - - MPX -
immintrin.h
- Miscellaneous - + + + + Checks if ["q", "q" + "size" - 1] is within the lower and upper bounds of "q" and throws a #BR if not. + IF (q + size - 1) < q.LB OR (q + size - 1) > q.UB + #BR +FI + + + + MPX +
immintrin.h
+ Miscellaneous +
- - - Return the lower bound of "q". - dst := q.LB - - MPX -
immintrin.h
- Miscellaneous - + + + Return the lower bound of "q". + dst := q.LB + + MPX +
immintrin.h
+ Miscellaneous +
- - - Return the upper bound of "q". - dst := q.UB - - MPX -
immintrin.h
- Miscellaneous - -
- - - - - Set "dst" to the index of the lowest set bit in 32-bit integer "a". If no bits - are set in "a" then "dst" is undefined. - - tmp := 0 - IF a == 0 - // dst is undefined - ELSE - DO WHILE ((tmp < 32) AND a[tmp] == 0) - tmp := tmp + 1 - OD - FI - dst := tmp - - -
immintrin.h
- Bit Manipulation + + + Return the upper bound of "q". + dst := q.UB + + MPX +
immintrin.h
+ Miscellaneous + +
+ + + + + Set "dst" to the index of the lowest set bit in 32-bit integer "a". If no bits are set in "a" then "dst" is undefined. + +tmp := 0 +IF a == 0 + // dst is undefined +ELSE + DO WHILE ((tmp < 32) AND a[tmp] == 0) + tmp := tmp + 1 + OD +FI +dst := tmp + + +
immintrin.h
+ Bit Manipulation
- - - Set "dst" to the index of the highest set bit in 32-bit integer "a". If no bits - are set in "a" then "dst" is undefined. - - tmp := 31 - IF a == 0 - // dst is undefined - ELSE - DO WHILE ((tmp > 0) AND a[tmp] == 0) - tmp := tmp - 1 - OD - FI - dst := tmp - - -
immintrin.h
- Bit Manipulation + + + Set "dst" to the index of the highest set bit in 32-bit integer "a". If no bits are set in "a" then "dst" is undefined. + +tmp := 31 +IF a == 0 + // dst is undefined +ELSE + DO WHILE ((tmp > 0) AND a[tmp] == 0) + tmp := tmp - 1 + OD +FI +dst := tmp + + +
immintrin.h
+ Bit Manipulation
- - - - Set "index" to the index of the lowest set bit in 32-bit integer "mask". If no - bits are set in "a", then "index" is undefined and "dst" is set to 0, otherwise "dst" is - set to 1. - - tmp := 0 - IF a == 0 - // MEM[index+31:index] is undefined - dst := 0 - ELSE - DO WHILE ((tmp < 32) AND a[tmp] == 0) - tmp := tmp + 1 - OD - MEM[index+31:index] := tmp - dst := (tmp == 31) ? 0 : 1 - FI - - -
immintrin.h
- Bit Manipulation + + + + Set "index" to the index of the lowest set bit in 32-bit integer "mask". If no bits are set in "a", then "index" is undefined and "dst" is set to 0, otherwise "dst" is set to 1. + +tmp := 0 +IF a == 0 + // MEM[index+31:index] is undefined + dst := 0 +ELSE + DO WHILE ((tmp < 32) AND a[tmp] == 0) + tmp := tmp + 1 + OD + MEM[index+31:index] := tmp + dst := (tmp == 31) ? 0 : 1 +FI + + +
immintrin.h
+ Bit Manipulation
- - - - Set "index" to the index of the highest set bit in 32-bit integer "mask". If no - bits are set in "a", then "index" is undefined and "dst" is set to 0, otherwise "dst" is - set to 1. - - tmp := 31 - IF a == 0 - // MEM[index+31:index] is undefined - dst := 0 - ELSE - DO WHILE ((tmp > 0) AND a[tmp] == 0) - tmp := tmp - 1 - OD - MEM[index+31:index] := tmp - dst := (tmp == 0) ? 0 : 1 - FI - - -
immintrin.h
- Bit Manipulation + + + + Set "index" to the index of the highest set bit in 32-bit integer "mask". If no bits are set in "a", then "index" is undefined and "dst" is set to 0, otherwise "dst" is set to 1. + +tmp := 31 +IF a == 0 + // MEM[index+31:index] is undefined + dst := 0 +ELSE + DO WHILE ((tmp > 0) AND a[tmp] == 0) + tmp := tmp - 1 + OD + MEM[index+31:index] := tmp + dst := (tmp == 0) ? 0 : 1 +FI + + +
immintrin.h
+ Bit Manipulation
- - - - Set "index" to the index of the lowest set bit in 32-bit integer "mask". If no - bits are set in "a", then "index" is undefined and "dst" is set to 0, otherwise "dst" is - set to 1. - - tmp := 0 - IF a == 0 - // MEM[index+31:index] is undefined - dst := 0 - ELSE - DO WHILE ((tmp < 64) AND a[tmp] == 0) - tmp := tmp + 1 - OD - MEM[index+31:index] := tmp - dst := (tmp == 63) ? 0 : 1 - FI - - -
immintrin.h
- Bit Manipulation + + + + Set "index" to the index of the lowest set bit in 32-bit integer "mask". If no bits are set in "a", then "index" is undefined and "dst" is set to 0, otherwise "dst" is set to 1. + +tmp := 0 +IF a == 0 + // MEM[index+31:index] is undefined + dst := 0 +ELSE + DO WHILE ((tmp < 64) AND a[tmp] == 0) + tmp := tmp + 1 + OD + MEM[index+31:index] := tmp + dst := (tmp == 63) ? 0 : 1 +FI + + +
immintrin.h
+ Bit Manipulation
- - - - Set "index" to the index of the highest set bit in 32-bit integer "mask". If no - bits are set in "a", then "index" is undefined and "dst" is set to 0, otherwise "dst" is - set to 1. - - tmp := 63 - IF a == 0 - // MEM[index+31:index] is undefined - dst := 0 - ELSE - DO WHILE ((tmp > 0) AND a[tmp] == 0) - tmp := tmp - 1 - OD - MEM[index+31:index] := tmp - dst := (tmp == 0) ? 0 : 1 - FI - - -
immintrin.h
- Bit Manipulation + + + + Set "index" to the index of the highest set bit in 32-bit integer "mask". If no bits are set in "a", then "index" is undefined and "dst" is set to 0, otherwise "dst" is set to 1. + +tmp := 63 +IF a == 0 + // MEM[index+31:index] is undefined + dst := 0 +ELSE + DO WHILE ((tmp > 0) AND a[tmp] == 0) + tmp := tmp - 1 + OD + MEM[index+31:index] := tmp + dst := (tmp == 0) ? 0 : 1 +FI + + +
immintrin.h
+ Bit Manipulation
- - - - Return the bit at index "b" of 32-bit integer "a". - - addr := a + ZeroExtend64(b) - dst[0] := MEM[addr] - - -
immintrin.h
- Bit Manipulation + + + + Return the bit at index "b" of 32-bit integer "a". + +addr := a + ZeroExtend64(b) +dst[0] := MEM[addr] + + +
immintrin.h
+ Bit Manipulation
- - - - Return the bit at index "b" of 32-bit integer "a", and set that bit to its - complement. - - addr := a + ZeroExtend64(b) - dst[0] := MEM[addr] - MEM[addr] := ~dst[0] - - -
immintrin.h
- Bit Manipulation + + + + Return the bit at index "b" of 32-bit integer "a", and set that bit to its complement. + +addr := a + ZeroExtend64(b) +dst[0] := MEM[addr] +MEM[addr] := ~dst[0] + + +
immintrin.h
+ Bit Manipulation
- - - - Return the bit at index "b" of 32-bit integer "a", and set that bit to zero. - - addr := a + ZeroExtend64(b) - dst[0] := MEM[addr] - MEM[addr] := 0 - - -
immintrin.h
- Bit Manipulation + + + + Return the bit at index "b" of 32-bit integer "a", and set that bit to zero. + +addr := a + ZeroExtend64(b) +dst[0] := MEM[addr] +MEM[addr] := 0 + + +
immintrin.h
+ Bit Manipulation
- - - - Return the bit at index "b" of 32-bit integer "a", and set that bit to one. - - addr := a + ZeroExtend64(b) - dst[0] := MEM[addr] - MEM[addr] := 1 - - -
immintrin.h
- Bit Manipulation + + + + Return the bit at index "b" of 32-bit integer "a", and set that bit to one. + +addr := a + ZeroExtend64(b) +dst[0] := MEM[addr] +MEM[addr] := 1 + + +
immintrin.h
+ Bit Manipulation
- - - - Return the bit at index "b" of 64-bit integer "a". - - addr := a + b - dst[0] := MEM[addr] - - -
immintrin.h
- Bit Manipulation + + + + Return the bit at index "b" of 64-bit integer "a". + +addr := a + b +dst[0] := MEM[addr] + + +
immintrin.h
+ Bit Manipulation
- - - - Return the bit at index "b" of 64-bit integer "a", and set that bit to its - complement. - - addr := a + b - dst[0] := MEM[addr] - MEM[addr] := ~dst[0] - - -
immintrin.h
- Bit Manipulation + + + + Return the bit at index "b" of 64-bit integer "a", and set that bit to its complement. + +addr := a + b +dst[0] := MEM[addr] +MEM[addr] := ~dst[0] + + +
immintrin.h
+ Bit Manipulation
- - - - Return the bit at index "b" of 64-bit integer "a", and set that bit to zero. - - addr := a + b - dst[0] := MEM[addr] - MEM[addr] := 0 - - -
immintrin.h
- Bit Manipulation + + + + Return the bit at index "b" of 64-bit integer "a", and set that bit to zero. + +addr := a + b +dst[0] := MEM[addr] +MEM[addr] := 0 + + +
immintrin.h
+ Bit Manipulation
- - - - Return the bit at index "b" of 64-bit integer "a", and set that bit to one. - - addr := a + b - dst[0] := MEM[addr] - MEM[addr] := 1 - - -
immintrin.h
- Bit Manipulation + + + + Return the bit at index "b" of 64-bit integer "a", and set that bit to one. + +addr := a + b +dst[0] := MEM[addr] +MEM[addr] := 1 + + +
immintrin.h
+ Bit Manipulation
- - - Reverse the byte order of 32-bit integer "a", and store the result in "dst". - This intrinsic is provided for conversion between little and big endian values. - - dst[7:0] := a[31:24] - dst[15:8] := a[23:16] - dst[23:16] := a[15:8] - dst[31:24] := a[7:0] - - -
immintrin.h
- Bit Manipulation + + + Reverse the byte order of 32-bit integer "a", and store the result in "dst". This intrinsic is provided for conversion between little and big endian values. + +dst[7:0] := a[31:24] +dst[15:8] := a[23:16] +dst[23:16] := a[15:8] +dst[31:24] := a[7:0] + + +
immintrin.h
+ Bit Manipulation
- - - Reverse the byte order of 64-bit integer "a", and store the result in "dst". - This intrinsic is provided for conversion between little and big endian values. - - dst[7:0] := a[63:56] - dst[15:8] := a[55:48] - dst[23:16] := a[47:40] - dst[31:24] := a[39:32] - dst[39:32] := a[31:24] - dst[47:40] := a[23:16] - dst[55:48] := a[15:8] - dst[63:56] := a[7:0] - - -
immintrin.h
- Bit Manipulation + + + Reverse the byte order of 64-bit integer "a", and store the result in "dst". This intrinsic is provided for conversion between little and big endian values. + +dst[7:0] := a[63:56] +dst[15:8] := a[55:48] +dst[23:16] := a[47:40] +dst[31:24] := a[39:32] +dst[39:32] := a[31:24] +dst[47:40] := a[23:16] +dst[55:48] := a[15:8] +dst[63:56] := a[7:0] + + +
immintrin.h
+ Bit Manipulation
- - - Cast from type float to type unsigned __int32 without conversion. - This intrinsic is only used for compilation and does not generate any instructions, thus - it has zero latency. -
immintrin.h
- Cast + + + Cast from type float to type unsigned __int32 without conversion. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+ Cast
- - - Cast from type double to type unsigned __int64 without conversion. - This intrinsic is only used for compilation and does not generate any instructions, thus - it has zero latency. -
immintrin.h
- Cast + + + Cast from type double to type unsigned __int64 without conversion. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+ Cast
- - - Cast from type unsigned __int32 to type float without conversion. - This intrinsic is only used for compilation and does not generate any instructions, thus - it has zero latency. -
immintrin.h
- Cast + + + Cast from type unsigned __int32 to type float without conversion. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+ Cast
- - - Cast from type unsigned __int64 to type double without conversion. - This intrinsic is only used for compilation and does not generate any instructions, thus - it has zero latency. -
immintrin.h
- Cast + + + Cast from type unsigned __int64 to type double without conversion. + This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. +
immintrin.h
+ Cast
- - - - Shift the bits of unsigned long integer "a" left by the number of bits - specified in "shift", rotating the most-significant bit to the least-significant bit - location, and store the unsigned result in "dst". - // size := 32 or 64 - dst := a - count := shift AND (size - 1) - DO WHILE (count > 0) - tmp[0] := dst[size - 1] - dst := (dst << 1) OR tmp[0] - count := count - 1 - OD - - - -
immintrin.h
- Shift + + + + Shift the bits of unsigned long integer "a" left by the number of bits specified in "shift", rotating the most-significant bit to the least-significant bit location, and store the unsigned result in "dst". + // size := 32 or 64 +dst := a +count := shift AND (size - 1) +DO WHILE (count > 0) + tmp[0] := dst[size - 1] + dst := (dst << 1) OR tmp[0] + count := count - 1 +OD + + + +
immintrin.h
+ Shift
- - - - Shift the bits of unsigned long integer "a" right by the number of bits - specified in "shift", rotating the least-significant bit to the most-significant bit - location, and store the unsigned result in "dst". - // size := 32 or 64 - dst := a - count := shift AND (size - 1) - DO WHILE (count > 0) - tmp[size - 1] := dst[0] - dst := (dst >> 1) OR tmp[size - 1] - count := count - 1 - OD - - -
immintrin.h
- Shift + + + + Shift the bits of unsigned long integer "a" right by the number of bits specified in "shift", rotating the least-significant bit to the most-significant bit location, and store the unsigned result in "dst". + // size := 32 or 64 +dst := a +count := shift AND (size - 1) +DO WHILE (count > 0) + tmp[size - 1] := dst[0] + dst := (dst >> 1) OR tmp[size - 1] + count := count - 1 +OD + + +
immintrin.h
+ Shift
- - - - Shift the bits of unsigned 32-bit integer "a" left by the number of bits - specified in "shift", rotating the most-significant bit to the least-significant bit - location, and store the unsigned result in "dst". - - dst := a - count := shift AND 31 - DO WHILE (count > 0) - tmp[0] := dst[31] - dst := (dst << 1) OR tmp[0] - count := count - 1 - OD - - -
immintrin.h
- Shift + + + + Shift the bits of unsigned 32-bit integer "a" left by the number of bits specified in "shift", rotating the most-significant bit to the least-significant bit location, and store the unsigned result in "dst". + +dst := a +count := shift AND 31 +DO WHILE (count > 0) + tmp[0] := dst[31] + dst := (dst << 1) OR tmp[0] + count := count - 1 +OD + + +
immintrin.h
+ Shift
- - - - Shift the bits of unsigned 32-bit integer "a" right by the number of bits - specified in "shift", rotating the least-significant bit to the most-significant bit - location, and store the unsigned result in "dst". - - dst := a - count := shift AND 31 - DO WHILE (count > 0) - tmp[31] := dst[0] - dst := (dst >> 1) OR tmp - count := count - 1 - OD - - -
immintrin.h
- Shift + + + + Shift the bits of unsigned 32-bit integer "a" right by the number of bits specified in "shift", rotating the least-significant bit to the most-significant bit location, and store the unsigned result in "dst". + +dst := a +count := shift AND 31 +DO WHILE (count > 0) + tmp[31] := dst[0] + dst := (dst >> 1) OR tmp + count := count - 1 +OD + + +
immintrin.h
+ Shift
- - - - Shift the bits of unsigned 16-bit integer "a" left by the number of bits - specified in "shift", rotating the most-significant bit to the least-significant bit - location, and store the unsigned result in "dst". - - dst := a - count := shift AND 15 - DO WHILE (count > 0) - tmp[0] := dst[15] - dst := (dst << 1) OR tmp[0] - count := count - 1 - OD - - -
immintrin.h
- Shift + + + + Shift the bits of unsigned 16-bit integer "a" left by the number of bits specified in "shift", rotating the most-significant bit to the least-significant bit location, and store the unsigned result in "dst". + +dst := a +count := shift AND 15 +DO WHILE (count > 0) + tmp[0] := dst[15] + dst := (dst << 1) OR tmp[0] + count := count - 1 +OD + + +
immintrin.h
+ Shift
- - - - Shift the bits of unsigned 16-bit integer "a" right by the number of bits - specified in "shift", rotating the least-significant bit to the most-significant bit - location, and store the unsigned result in "dst". - - dst := a - count := shift AND 15 - DO WHILE (count > 0) - tmp[15] := dst[0] - dst := (dst >> 1) OR tmp - count := count - 1 - OD - - -
immintrin.h
- Shift + + + + Shift the bits of unsigned 16-bit integer "a" right by the number of bits specified in "shift", rotating the least-significant bit to the most-significant bit location, and store the unsigned result in "dst". + +dst := a +count := shift AND 15 +DO WHILE (count > 0) + tmp[15] := dst[0] + dst := (dst >> 1) OR tmp + count := count - 1 +OD + + +
immintrin.h
+ Shift
- - - - Shift the bits of unsigned 64-bit integer "a" left by the number of bits - specified in "shift", rotating the most-significant bit to the least-significant bit - location, and store the unsigned result in "dst". - - dst := a - count := shift AND 63 - DO WHILE (count > 0) - tmp[0] := dst[63] - dst := (dst << 1) OR tmp[0] - count := count - 1 - OD - - -
immintrin.h
- Shift + + + + Shift the bits of unsigned 64-bit integer "a" left by the number of bits specified in "shift", rotating the most-significant bit to the least-significant bit location, and store the unsigned result in "dst". + +dst := a +count := shift AND 63 +DO WHILE (count > 0) + tmp[0] := dst[63] + dst := (dst << 1) OR tmp[0] + count := count - 1 +OD + + +
immintrin.h
+ Shift
- - - - Shift the bits of unsigned 64-bit integer "a" right by the number of bits - specified in "shift", rotating the least-significant bit to the most-significant bit - location, and store the unsigned result in "dst". - - dst := a - count := shift AND 63 - DO WHILE (count > 0) - tmp[63] := dst[0] - dst := (dst >> 1) OR tmp[63] - count := count - 1 - OD - - -
immintrin.h
- Shift + + + + Shift the bits of unsigned 64-bit integer "a" right by the number of bits specified in "shift", rotating the least-significant bit to the most-significant bit location, and store the unsigned result in "dst". + +dst := a +count := shift AND 63 +DO WHILE (count > 0) + tmp[63] := dst[0] + dst := (dst >> 1) OR tmp[63] + count := count - 1 +OD + + +
immintrin.h
+ Shift
- - - Treat the processor-specific feature(s) specified in "a" as available. Multiple - features may be OR'd together. See the valid feature flags below: - - _FEATURE_GENERIC_IA32 - _FEATURE_FPU - _FEATURE_CMOV - _FEATURE_MMX - _FEATURE_FXSAVE - _FEATURE_SSE - _FEATURE_SSE2 - _FEATURE_SSE3 - _FEATURE_SSSE3 - _FEATURE_SSE4_1 - _FEATURE_SSE4_2 - _FEATURE_MOVBE - _FEATURE_POPCNT - _FEATURE_PCLMULQDQ - _FEATURE_AES - _FEATURE_F16C - _FEATURE_AVX - _FEATURE_RDRND - _FEATURE_FMA - _FEATURE_BMI - _FEATURE_LZCNT - _FEATURE_HLE - _FEATURE_RTM - _FEATURE_AVX2 - _FEATURE_KNCNI - _FEATURE_AVX512F - _FEATURE_ADX - _FEATURE_RDSEED - _FEATURE_AVX512ER - _FEATURE_AVX512PF - _FEATURE_AVX512CD - _FEATURE_SHA - _FEATURE_MPX - _FEATURE_AVX512BW - _FEATURE_AVX512VL - _FEATURE_AVX512VBMI - _FEATURE_AVX512_4FMAPS - _FEATURE_AVX512_4VNNIW - _FEATURE_AVX512_VPOPCNTDQ - _FEATURE_AVX512_BITALG - _FEATURE_AVX512_VBMI2 - _FEATURE_GFNI - _FEATURE_VAES - _FEATURE_VPCLMULQDQ - _FEATURE_AVX512_VNNI - _FEATURE_CLWB - _FEATURE_RDPID - _FEATURE_IBT - _FEATURE_SHSTK - _FEATURE_SGX - _FEATURE_WBNOINVD - _FEATURE_PCONFIG - _FEATURE_AXV512_4VNNIB - _FEATURE_AXV512_4FMAPH - _FEATURE_AXV512_BITALG2 - _FEATURE_AXV512_VP2INTERSECT - -
immintrin.h
- General Support + + + Treat the processor-specific feature(s) specified in "a" as available. Multiple features may be OR'd together. See the valid feature flags below: + +_FEATURE_GENERIC_IA32 +_FEATURE_FPU +_FEATURE_CMOV +_FEATURE_MMX +_FEATURE_FXSAVE +_FEATURE_SSE +_FEATURE_SSE2 +_FEATURE_SSE3 +_FEATURE_SSSE3 +_FEATURE_SSE4_1 +_FEATURE_SSE4_2 +_FEATURE_MOVBE +_FEATURE_POPCNT +_FEATURE_PCLMULQDQ +_FEATURE_AES +_FEATURE_F16C +_FEATURE_AVX +_FEATURE_RDRND +_FEATURE_FMA +_FEATURE_BMI +_FEATURE_LZCNT +_FEATURE_HLE +_FEATURE_RTM +_FEATURE_AVX2 +_FEATURE_KNCNI +_FEATURE_AVX512F +_FEATURE_ADX +_FEATURE_RDSEED +_FEATURE_AVX512ER +_FEATURE_AVX512PF +_FEATURE_AVX512CD +_FEATURE_SHA +_FEATURE_MPX +_FEATURE_AVX512BW +_FEATURE_AVX512VL +_FEATURE_AVX512VBMI +_FEATURE_AVX512_4FMAPS +_FEATURE_AVX512_4VNNIW +_FEATURE_AVX512_VPOPCNTDQ +_FEATURE_AVX512_BITALG +_FEATURE_AVX512_VBMI2 +_FEATURE_GFNI +_FEATURE_VAES +_FEATURE_VPCLMULQDQ +_FEATURE_AVX512_VNNI +_FEATURE_CLWB +_FEATURE_RDPID +_FEATURE_IBT +_FEATURE_SHSTK +_FEATURE_SGX +_FEATURE_WBNOINVD +_FEATURE_PCONFIG +_FEATURE_AXV512_4VNNIB +_FEATURE_AXV512_4FMAPH +_FEATURE_AXV512_BITALG2 +_FEATURE_AXV512_VP2INTERSECT + +
immintrin.h
+ General Support
- - - Dynamically query the processor to determine if the processor-specific - feature(s) specified in "a" are available, and return true or false (1 or 0) if the set - of features is available. Multiple features may be OR'd together. This function is - limited to bitmask values in the first 'page' of the libirc cpu-id information. This - intrinsic does not check the processor vendor. See the valid feature flags below: - - _FEATURE_GENERIC_IA32 - _FEATURE_FPU - _FEATURE_CMOV - _FEATURE_MMX - _FEATURE_FXSAVE - _FEATURE_SSE - _FEATURE_SSE2 - _FEATURE_SSE3 - _FEATURE_SSSE3 - _FEATURE_SSE4_1 - _FEATURE_SSE4_2 - _FEATURE_MOVBE - _FEATURE_POPCNT - _FEATURE_PCLMULQDQ - _FEATURE_AES - _FEATURE_F16C - _FEATURE_AVX - _FEATURE_RDRND - _FEATURE_FMA - _FEATURE_BMI - _FEATURE_LZCNT - _FEATURE_HLE - _FEATURE_RTM - _FEATURE_AVX2 - _FEATURE_KNCNI - _FEATURE_AVX512F - _FEATURE_ADX - _FEATURE_RDSEED - _FEATURE_AVX512ER - _FEATURE_AVX512PF - _FEATURE_AVX512CD - _FEATURE_SHA - _FEATURE_MPX - _FEATURE_AVX512BW - _FEATURE_AVX512VL - _FEATURE_AVX512VBMI - _FEATURE_AVX512_4FMAPS - _FEATURE_AVX512_4VNNIW - _FEATURE_AVX512_VPOPCNTDQ - _FEATURE_AVX512_BITALG - _FEATURE_AVX512_VBMI2 - _FEATURE_GFNI - _FEATURE_VAES - _FEATURE_VPCLMULQDQ - _FEATURE_AVX512_VNNI - _FEATURE_CLWB - _FEATURE_RDPID - _FEATURE_IBT - _FEATURE_SHSTK - _FEATURE_SGX - _FEATURE_WBNOINVD - _FEATURE_PCONFIG - _FEATURE_AXV512_4VNNIB - _FEATURE_AXV512_4FMAPH - _FEATURE_AXV512_BITALG2 - _FEATURE_AXV512_VP2INTERSECT - _FEATURE_AXV512_FP16 - -
immintrin.h
- General Support + + + Dynamically query the processor to determine if the processor-specific feature(s) specified in "a" are available, and return true or false (1 or 0) if the set of features is available. Multiple features may be OR'd together. This function is limited to bitmask values in the first 'page' of the libirc cpu-id information. This intrinsic does not check the processor vendor. See the valid feature flags below: + +_FEATURE_GENERIC_IA32 +_FEATURE_FPU +_FEATURE_CMOV +_FEATURE_MMX +_FEATURE_FXSAVE +_FEATURE_SSE +_FEATURE_SSE2 +_FEATURE_SSE3 +_FEATURE_SSSE3 +_FEATURE_SSE4_1 +_FEATURE_SSE4_2 +_FEATURE_MOVBE +_FEATURE_POPCNT +_FEATURE_PCLMULQDQ +_FEATURE_AES +_FEATURE_F16C +_FEATURE_AVX +_FEATURE_RDRND +_FEATURE_FMA +_FEATURE_BMI +_FEATURE_LZCNT +_FEATURE_HLE +_FEATURE_RTM +_FEATURE_AVX2 +_FEATURE_KNCNI +_FEATURE_AVX512F +_FEATURE_ADX +_FEATURE_RDSEED +_FEATURE_AVX512ER +_FEATURE_AVX512PF +_FEATURE_AVX512CD +_FEATURE_SHA +_FEATURE_MPX +_FEATURE_AVX512BW +_FEATURE_AVX512VL +_FEATURE_AVX512VBMI +_FEATURE_AVX512_4FMAPS +_FEATURE_AVX512_4VNNIW +_FEATURE_AVX512_VPOPCNTDQ +_FEATURE_AVX512_BITALG +_FEATURE_AVX512_VBMI2 +_FEATURE_GFNI +_FEATURE_VAES +_FEATURE_VPCLMULQDQ +_FEATURE_AVX512_VNNI +_FEATURE_CLWB +_FEATURE_RDPID +_FEATURE_IBT +_FEATURE_SHSTK +_FEATURE_SGX +_FEATURE_WBNOINVD +_FEATURE_PCONFIG +_FEATURE_AXV512_4VNNIB +_FEATURE_AXV512_4FMAPH +_FEATURE_AXV512_BITALG2 +_FEATURE_AXV512_VP2INTERSECT +_FEATURE_AXV512_FP16 + +
immintrin.h
+ General Support
- - - - Dynamically query the processor to determine if the processor-specific - feature(s) specified in "a" are available, and return true or false (1 or 0) if the set - of features is available. Multiple features may be OR'd together. This works identically - to the previous variant, except it also accepts a 'page' index that permits checking - features on the 2nd page of the libirc information. When provided with a '0' in the - 'page' parameter, this works identically to _may_i_use_cpu_feature. This intrinsic does - not check the processor vendor. See the valid feature flags on the 2nd page below: - (provided with a '1' in the 'page' parameter) - - _FEATURE_CLDEMOTE - _FEATURE_MOVDIRI - _FEATURE_MOVDIR64B - _FEATURE_WAITPKG - _FEATURE_AVX512_Bf16 - _FEATURE_ENQCMD - _FEATURE_AVX_VNNI - _FEATURE_AMX_TILE - _FEATURE_AMX_INT8 - _FEATURE_AMX_BF16 - _FEATURE_KL - _FEATURE_WIDE_KL - _FEATURE_HRESET - _FEATURE_UINTR - _FEATURE_PREFETCHI - _FEATURE_AVXVNNIINT8 - _FEATURE_CMPCCXADD - _FEATURE_AVXIFMA - _FEATURE_AVXNECONVERT - _FEATURE_RAOINT - _FEATURE_AMX_FP16 - _FEATURE_AMX_COMPLEX - _FEATURE_SHA512 - _FEATURE_SM3 - _FEATURE_SM4 - _FEATURE_AVXVNNIINT16 - _FEATURE_USERMSR - _FEATURE_AVX10_1_256 - _FEATURE_AVX10_1_512 - _FEATURE_APXF - _FEATURE_MSRLIST - _FEATURE_WRMSRNS - _FEATURE_PBNDKB - -
immintrin.h
- General Support + + + + Dynamically query the processor to determine if the processor-specific feature(s) specified in "a" are available, and return true or false (1 or 0) if the set of features is available. Multiple features may be OR'd together. This works identically to the previous variant, except it also accepts a 'page' index that permits checking features on the 2nd page of the libirc information. When provided with a '0' in the 'page' parameter, this works identically to _may_i_use_cpu_feature. This intrinsic does not check the processor vendor. See the valid feature flags on the 2nd page below: (provided with a '1' in the 'page' parameter) + +_FEATURE_CLDEMOTE +_FEATURE_MOVDIRI +_FEATURE_MOVDIR64B +_FEATURE_WAITPKG +_FEATURE_AVX512_Bf16 +_FEATURE_ENQCMD +_FEATURE_AVX_VNNI +_FEATURE_AMX_TILE +_FEATURE_AMX_INT8 +_FEATURE_AMX_BF16 +_FEATURE_KL +_FEATURE_WIDE_KL +_FEATURE_HRESET +_FEATURE_UINTR +_FEATURE_PREFETCHI +_FEATURE_AVXVNNIINT8 +_FEATURE_CMPCCXADD +_FEATURE_AVXIFMA +_FEATURE_AVXNECONVERT +_FEATURE_RAOINT +_FEATURE_AMX_FP16 +_FEATURE_AMX_COMPLEX +_FEATURE_SHA512 +_FEATURE_SM3 +_FEATURE_SM4 +_FEATURE_AVXVNNIINT16 +_FEATURE_USERMSR +_FEATURE_AVX10_1_256 +_FEATURE_AVX10_1_512 +_FEATURE_APXF +_FEATURE_MSRLIST +_FEATURE_WRMSRNS +_FEATURE_PBNDKB + +
immintrin.h
+ General Support
- - - Dynamically query the processor to determine if the processor-specific - feature(s) specified a series of compile-time string literals in "feature, ..." are - available, and return true or false (1 or 0) if the set of features is available. These - feature names are converted to a bitmask and uses the same infrastructure as - _may_i_use_cpu_feature_ext to validate it. The behavior is the same as the previous - variants. This intrinsic does not check the processor vendor. Example string literals - are "avx2", "bmi", "avx512fp16", ... - - -
immintrin.h
- General Support + + + Dynamically query the processor to determine if the processor-specific feature(s) specified a series of compile-time string literals in "feature, ..." are available, and return true or false (1 or 0) if the set of features is available. These feature names are converted to a bitmask and uses the same infrastructure as _may_i_use_cpu_feature_ext to validate it. The behavior is the same as the previous variants. This intrinsic does not check the processor vendor. Supported string literals are one-to-one corresponding in the "Operation" sections of _may_i_use_cpu_feature and _may_i_use_cpu_feature_ext. Example string literals are "avx2", "bmi", "avx512fp16", "amx-int8"... + + +
immintrin.h
+ General Support
- - - Read the Performance Monitor Counter (PMC) specified by "a", and store up to - 64-bits in "dst". The width of performance counters is implementation specific. - dst[63:0] := ReadPMC(a) - - -
immintrin.h
- General Support + + + Read the Performance Monitor Counter (PMC) specified by "a", and store up to 64-bits in "dst". The width of performance counters is implementation specific. + dst[63:0] := ReadPMC(a) + + +
immintrin.h
+ General Support
- - - - - - Add unsigned 32-bit integers "a" and "b" with unsigned 8-bit carry-in "c_in" - (carry flag), and store the unsigned 32-bit result in "out", and the carry-out in "dst" - (carry or overflow flag). - - tmp[32:0] := a[31:0] + b[31:0] + (c_in > 0 ? 1 : 0) - MEM[out+31:out] := tmp[31:0] - dst[0] := tmp[32] - dst[7:1] := 0 - - -
immintrin.h
- Arithmetic + + + + + + Add unsigned 32-bit integers "a" and "b" with unsigned 8-bit carry-in "c_in" (carry flag), and store the unsigned 32-bit result in "out", and the carry-out in "dst" (carry or overflow flag). + +tmp[32:0] := a[31:0] + b[31:0] + (c_in > 0 ? 1 : 0) +MEM[out+31:out] := tmp[31:0] +dst[0] := tmp[32] +dst[7:1] := 0 + + +
immintrin.h
+ Arithmetic
- - - - - - Add unsigned 64-bit integers "a" and "b" with unsigned 8-bit carry-in "c_in" - (carry flag), and store the unsigned 64-bit result in "out", and the carry-out in "dst" - (carry or overflow flag). - - tmp[64:0] := a[63:0] + b[63:0] + (c_in > 0 ? 1 : 0) - MEM[out+63:out] := tmp[63:0] - dst[0] := tmp[64] - dst[7:1] := 0 - - -
immintrin.h
- Arithmetic + + + + + + Add unsigned 64-bit integers "a" and "b" with unsigned 8-bit carry-in "c_in" (carry flag), and store the unsigned 64-bit result in "out", and the carry-out in "dst" (carry or overflow flag). + +tmp[64:0] := a[63:0] + b[63:0] + (c_in > 0 ? 1 : 0) +MEM[out+63:out] := tmp[63:0] +dst[0] := tmp[64] +dst[7:1] := 0 + + +
immintrin.h
+ Arithmetic
- - - - - - Add unsigned 8-bit borrow "c_in" (carry flag) to unsigned 32-bit integer "b", - and subtract the result from unsigned 32-bit integer "a". Store the unsigned 32-bit - result in "out", and the carry-out in "dst" (carry or overflow flag). - - tmp[32:0] := a[31:0] - (b[31:0] + (c_in > 0 ? 1 : 0)) - MEM[out+31:out] := tmp[31:0] - dst[0] := tmp[32] - dst[7:1] := 0 - - -
immintrin.h
- Arithmetic + + + + + + Add unsigned 8-bit borrow "c_in" (carry flag) to unsigned 32-bit integer "b", and subtract the result from unsigned 32-bit integer "a". Store the unsigned 32-bit result in "out", and the carry-out in "dst" (carry or overflow flag). + +tmp[32:0] := a[31:0] - (b[31:0] + (c_in > 0 ? 1 : 0)) +MEM[out+31:out] := tmp[31:0] +dst[0] := tmp[32] +dst[7:1] := 0 + + +
immintrin.h
+ Arithmetic
- - - - - - Add unsigned 8-bit borrow "c_in" (carry flag) to unsigned 64-bit integer "b", - and subtract the result from unsigned 64-bit integer "a". Store the unsigned 64-bit - result in "out", and the carry-out in "dst" (carry or overflow flag). - - tmp[64:0] := a[63:0] - (b[63:0] + (c_in > 0 ? 1 : 0)) - MEM[out+63:out] := tmp[63:0] - dst[0] := tmp[64] - dst[7:1] := 0 - - -
immintrin.h
- Arithmetic + + + + + + Add unsigned 8-bit borrow "c_in" (carry flag) to unsigned 64-bit integer "b", and subtract the result from unsigned 64-bit integer "a". Store the unsigned 64-bit result in "out", and the carry-out in "dst" (carry or overflow flag). + +tmp[64:0] := a[63:0] - (b[63:0] + (c_in > 0 ? 1 : 0)) +MEM[out+63:out] := tmp[63:0] +dst[0] := tmp[64] +dst[7:1] := 0 + + +
immintrin.h
+ Arithmetic
- - - Insert the 32-bit data from "a" into a Processor Trace stream via a PTW packet. - The PTW packet will be inserted if tracing is currently enabled and ptwrite is currently - enabled. The current IP will also be inserted via a FUP packet if FUPonPTW is enabled. - -
immintrin.h
- Miscellaneous + + + Insert the 32-bit data from "a" into a Processor Trace stream via a PTW packet. The PTW packet will be inserted if tracing is currently enabled and ptwrite is currently enabled. The current IP will also be inserted via a FUP packet if FUPonPTW is enabled. + +
immintrin.h
+ Miscellaneous
- - - Insert the 64-bit data from "a" into a Processor Trace stream via a PTW packet. - The PTW packet will be inserted if tracing is currently enabled and ptwrite is currently - enabled. The current IP will also be inserted via a FUP packet if FUPonPTW is enabled. - -
immintrin.h
- Miscellaneous + + + Insert the 64-bit data from "a" into a Processor Trace stream via a PTW packet. The PTW packet will be inserted if tracing is currently enabled and ptwrite is currently enabled. The current IP will also be inserted via a FUP packet if FUPonPTW is enabled. + +
immintrin.h
+ Miscellaneous
- - - - Invoke the Intel SGX enclave user (non-privilege) leaf function specified by - "a", and return the error code. The "__data" array contains 3 32- or 64-bit elements - that may act as input, output, or be unused, depending on the semantics of the specified - leaf function; these correspond to ebx, ecx, and edx. - -
immintrin.h
- Miscellaneous + + + + Invoke the Intel SGX enclave user (non-privilege) leaf function specified by "a", and return the error code. The "__data" array contains 3 32- or 64-bit elements that may act as input, output, or be unused, depending on the semantics of the specified leaf function; these correspond to ebx, ecx, and edx. + +
immintrin.h
+ Miscellaneous
- - - - Invoke the Intel SGX enclave system (privileged) leaf function specified by - "a", and return the error code. The "__data" array contains 3 32- or 64-bit elements - that may act as input, output, or be unused, depending on the semantics of the specified - leaf function; these correspond to ebx, ecx, and edx. - -
immintrin.h
- Miscellaneous + + + + Invoke the Intel SGX enclave system (privileged) leaf function specified by "a", and return the error code. The "__data" array contains 3 32- or 64-bit elements that may act as input, output, or be unused, depending on the semantics of the specified leaf function; these correspond to ebx, ecx, and edx. + +
immintrin.h
+ Miscellaneous
- - - - Invoke the Intel SGX enclave virtualized (VMM) leaf function specified by "a", - and return the error code. The "__data" array contains 3 32- or 64-bit elements that may - act as input, output, or be unused, depending on the semantics of the specified leaf - function; these correspond to ebx, ecx, and edx. - -
immintrin.h
- Miscellaneous + + + + Invoke the Intel SGX enclave virtualized (VMM) leaf function specified by "a", and return the error code. The "__data" array contains 3 32- or 64-bit elements that may act as input, output, or be unused, depending on the semantics of the specified leaf function; these correspond to ebx, ecx, and edx. + +
immintrin.h
+ Miscellaneous
- - - Write back and flush internal caches. - Initiate writing-back and flushing of external - caches. - -
immintrin.h
- Miscellaneous + + + Write back and flush internal caches. + Initiate writing-back and flushing of external + caches. + +
immintrin.h
+ Miscellaneous
- - - Convert the half-precision (16-bit) floating-point value "a" to a - single-precision (32-bit) floating-point value, and store the result in "dst". - - dst[31:0] := Convert_FP16_To_FP32(a[15:0]) - -
emmintrin.h
- Convert + + + Convert the half-precision (16-bit) floating-point value "a" to a single-precision (32-bit) floating-point value, and store the result in "dst". + +dst[31:0] := Convert_FP16_To_FP32(a[15:0]) + +
emmintrin.h
+ Convert
- - - - Convert the single-precision (32-bit) floating-point value "a" to a - half-precision (16-bit) floating-point value, and store the result in "dst". - [round_note] - - dst[15:0] := Convert_FP32_To_FP16(a[31:0]) - -
emmintrin.h
- Convert -
- - - - - - - Perform a carry-less multiplication of two 64-bit integers, selected from "a" - and "b" according to "imm8", and store the results in "dst". - - IF (imm8[0] == 0) - TEMP1 := a[63:0] - ELSE - TEMP1 := a[127:64] - FI - IF (imm8[4] == 0) - TEMP2 := b[63:0] - ELSE - TEMP2 := b[127:64] - FI - FOR i := 0 to 63 - TEMP[i] := (TEMP1[0] and TEMP2[i]) - FOR j := 1 to i - TEMP[i] := TEMP[i] XOR (TEMP1[j] AND TEMP2[i-j]) - ENDFOR - dst[i] := TEMP[i] - ENDFOR - FOR i := 64 to 127 - TEMP[i] := 0 - FOR j := (i - 63) to 63 - TEMP[i] := TEMP[i] XOR (TEMP1[j] AND TEMP2[i-j]) - ENDFOR - dst[i] := TEMP[i] - ENDFOR - dst[127] := 0 - - - PCLMULQDQ -
wmmintrin.h
- Application-Targeted -
- - - - - - - Invoke the PCONFIG leaf function specified by "a". The "__data" array contains - 3 32- or 64-bit elements that may act as input, output, or be unused, depending on the - semantics of the specified leaf function; these correspond to ebx, ecx, and edx. May - return the value in eax, depending on the semantics of the specified leaf function. - - PCONFIG -
immintrin.h
- Miscellaneous -
- - - - - - Count the number of bits set to 1 in unsigned 32-bit integer "a", and return - that count in "dst". - - dst := 0 - FOR i := 0 to 31 - IF a[i] - dst := dst + 1 - FI - ENDFOR - - - POPCNT -
immintrin.h
- Bit Manipulation + + + + Convert the single-precision (32-bit) floating-point value "a" to a half-precision (16-bit) floating-point value, and store the result in "dst". + [round_note] + +dst[15:0] := Convert_FP32_To_FP16(a[31:0]) + +
emmintrin.h
+ Convert +
+ + + + + + + Perform a carry-less multiplication of two 64-bit integers, selected from "a" and "b" according to "imm8", and store the results in "dst". + +IF (imm8[0] == 0) + TEMP1 := a[63:0] +ELSE + TEMP1 := a[127:64] +FI +IF (imm8[4] == 0) + TEMP2 := b[63:0] +ELSE + TEMP2 := b[127:64] +FI +FOR i := 0 to 63 + TEMP[i] := (TEMP1[0] and TEMP2[i]) + FOR j := 1 to i + TEMP[i] := TEMP[i] XOR (TEMP1[j] AND TEMP2[i-j]) + ENDFOR + dst[i] := TEMP[i] +ENDFOR +FOR i := 64 to 127 + TEMP[i] := 0 + FOR j := (i - 63) to 63 + TEMP[i] := TEMP[i] XOR (TEMP1[j] AND TEMP2[i-j]) + ENDFOR + dst[i] := TEMP[i] +ENDFOR +dst[127] := 0 + + + PCLMULQDQ +
wmmintrin.h
+ Application-Targeted +
+ + + + + + + Invoke the PCONFIG leaf function specified by "a". The "__data" array contains 3 32- or 64-bit elements that may act as input, output, or be unused, depending on the semantics of the specified leaf function; these correspond to ebx, ecx, and edx. May return the value in eax, depending on the semantics of the specified leaf function. + + PCONFIG +
immintrin.h
+ Miscellaneous +
+ + + + + + Count the number of bits set to 1 in unsigned 32-bit integer "a", and return that count in "dst". + +dst := 0 +FOR i := 0 to 31 + IF a[i] + dst := dst + 1 + FI +ENDFOR + + + POPCNT +
immintrin.h
+ Bit Manipulation
- - - Count the number of bits set to 1 in unsigned 64-bit integer "a", and return - that count in "dst". - - dst := 0 - FOR i := 0 to 63 - IF a[i] - dst := dst + 1 - FI - ENDFOR - - - POPCNT -
immintrin.h
- Bit Manipulation + + + Count the number of bits set to 1 in unsigned 64-bit integer "a", and return that count in "dst". + +dst := 0 +FOR i := 0 to 63 + IF a[i] + dst := dst + 1 + FI +ENDFOR + + + POPCNT +
immintrin.h
+ Bit Manipulation
- - - Count the number of bits set to 1 in 32-bit integer "a", and return that count - in "dst". - - dst := 0 - FOR i := 0 to 31 - IF a[i] - dst := dst + 1 - FI - ENDFOR - - - POPCNT -
immintrin.h
- Bit Manipulation + + + Count the number of bits set to 1 in 32-bit integer "a", and return that count in "dst". + +dst := 0 +FOR i := 0 to 31 + IF a[i] + dst := dst + 1 + FI +ENDFOR + + + POPCNT +
immintrin.h
+ Bit Manipulation
- - - Count the number of bits set to 1 in 64-bit integer "a", and return that count - in "dst". - - dst := 0 - FOR i := 0 to 63 - IF a[i] - dst := dst + 1 - FI - ENDFOR - - - POPCNT -
immintrin.h
- Bit Manipulation -
- - + + + Count the number of bits set to 1 in 64-bit integer "a", and return that count in "dst". + +dst := 0 +FOR i := 0 to 63 + IF a[i] + dst := dst + 1 + FI +ENDFOR + + + POPCNT +
immintrin.h
+ Bit Manipulation +
+ + - - - Loads an instruction sequence containing the specified memory address into all - level cache. - - PREFETCHI -
x86gprintrin.h
- General Support + + + Loads an instruction sequence containing the specified memory address into all level cache. + + PREFETCHI +
x86gprintrin.h
+ General Support
- - - Loads an instruction sequence containing the specified memory address into all - but the first-level cache. - - PREFETCHI -
x86gprintrin.h
- General Support -
- - - - - Fetch the line of data from memory that contains address "p" to a location in - the cache hierarchy specified by the locality hint "i", which can be one of:<ul> - <li>_MM_HINT_ET0 // 7, move data using the ET0 hint. The PREFETCHW instruction - will be generated.</li> - <li>_MM_HINT_T0 // 3, move data using the T0 hint. The PREFETCHT0 instruction will - be generated.</li> - <li>_MM_HINT_T1 // 2, move data using the T1 hint. The PREFETCHT1 instruction will - be generated.</li> - <li>_MM_HINT_T2 // 1, move data using the T2 hint. The PREFETCHT2 instruction will - be generated.</li> - <li>_MM_HINT_NTA // 0, move data using the non-temporal access (NTA) hint. The - PREFETCHNTA instruction will be generated.</li> + + + Loads an instruction sequence containing the specified memory address into all but the first-level cache. + + PREFETCHI +
x86gprintrin.h
+ General Support +
+ + + + + Fetch the line of data from memory that contains address "p" to a location in the cache hierarchy specified by the locality hint "i", which can be one of:<ul> + <li>_MM_HINT_ET0 // 7, move data using the ET0 hint. The PREFETCHW instruction will be generated.</li> + <li>_MM_HINT_T0 // 3, move data using the T0 hint. The PREFETCHT0 instruction will be generated.</li> + <li>_MM_HINT_T1 // 2, move data using the T1 hint. The PREFETCHT1 instruction will be generated.</li> + <li>_MM_HINT_T2 // 1, move data using the T2 hint. The PREFETCHT2 instruction will be generated.</li> + <li>_MM_HINT_NTA // 0, move data using the non-temporal access (NTA) hint. The PREFETCHNTA instruction will be generated.</li> - - - - - - PRFCHW -
immintrin.h
- General Support -
- - - + + + + + + PRFCHW +
immintrin.h
+ General Support +
+ + + - Atomically add a 32-bit value at memory operand "__A" and a 32-bit "__B", and - store the result to the same memory location. + Atomically add a 32-bit value at memory operand "__A" and a 32-bit "__B", and store the result to the same memory location. - MEM[__A+31:__A] := MEM[__A+31:__A] + __B[31:0] +MEM[__A+31:__A] := MEM[__A+31:__A] + __B[31:0] - RAO_INT -
x86gprintrin.h
- Arithmetic + RAO_INT +
x86gprintrin.h
+ Arithmetic
- Atomically add a 64-bit value at memory operand "__A" and a 64-bit "__B", and - store the result to the same memory location. + Atomically add a 64-bit value at memory operand "__A" and a 64-bit "__B", and store the result to the same memory location. - MEM[__A+63:__A] := MEM[__A+63:__A] + __B[63:0] +MEM[__A+63:__A] := MEM[__A+63:__A] + __B[63:0] - RAO_INT -
x86gprintrin.h
- Arithmetic + RAO_INT +
x86gprintrin.h
+ Arithmetic
- Atomically and a 32-bit value at memory operand "__A" and a 32-bit "__B", and - store the result to the same memory location. + Atomically and a 32-bit value at memory operand "__A" and a 32-bit "__B", and store the result to the same memory location. - MEM[__A+31:__A] := MEM[__A+31:__A] AND __B[31:0] +MEM[__A+31:__A] := MEM[__A+31:__A] AND __B[31:0] - RAO_INT -
x86gprintrin.h
- Arithmetic + RAO_INT +
x86gprintrin.h
+ Arithmetic
- Atomically and a 64-bit value at memory operand "__A" and a 64-bit "__B", and - store the result to the same memory location. + Atomically and a 64-bit value at memory operand "__A" and a 64-bit "__B", and store the result to the same memory location. - MEM[__A+63:__A] := MEM[__A+63:__A] AND __B[63:0] +MEM[__A+63:__A] := MEM[__A+63:__A] AND __B[63:0] - RAO_INT -
x86gprintrin.h
- Arithmetic + RAO_INT +
x86gprintrin.h
+ Arithmetic
- Atomically or a 32-bit value at memory operand "__A" and a 32-bit "__B", and - store the result to the same memory location. + Atomically or a 32-bit value at memory operand "__A" and a 32-bit "__B", and store the result to the same memory location. - MEM[__A+31:__A] := MEM[__A+31:__A] OR __B[31:0] +MEM[__A+31:__A] := MEM[__A+31:__A] OR __B[31:0] - RAO_INT -
x86gprintrin.h
- Arithmetic + RAO_INT +
x86gprintrin.h
+ Arithmetic
- Atomically or a 64-bit value at memory operand "__A" and a 64-bit "__B", and - store the result to the same memory location. + Atomically or a 64-bit value at memory operand "__A" and a 64-bit "__B", and store the result to the same memory location. - MEM[__A+63:__A] := MEM[__A+63:__A] OR __B[63:0] +MEM[__A+63:__A] := MEM[__A+63:__A] OR __B[63:0] - RAO_INT -
x86gprintrin.h
- Arithmetic + RAO_INT +
x86gprintrin.h
+ Arithmetic
- Atomically xor a 32-bit value at memory operand "__A" and a 32-bit "__B", and - store the result to the same memory location. + Atomically xor a 32-bit value at memory operand "__A" and a 32-bit "__B", and store the result to the same memory location. - MEM[__A+31:__A] := MEM[__A+31:__A] XOR __B[31:0] +MEM[__A+31:__A] := MEM[__A+31:__A] XOR __B[31:0] - RAO_INT -
x86gprintrin.h
- Arithmetic + RAO_INT +
x86gprintrin.h
+ Arithmetic
- Atomically xor a 64-bit value at memory operand "__A" and a 64-bit "__B", and - store the result to the same memory location. + Atomically xor a 64-bit value at memory operand "__A" and a 64-bit "__B", and store the result to the same memory location. - MEM[__A+63:__A] := MEM[__A+63:__A] XOR __B[63:0] +MEM[__A+63:__A] := MEM[__A+63:__A] XOR __B[63:0] - RAO_INT -
x86gprintrin.h
- Arithmetic -
- - - - Copy the IA32_TSC_AUX MSR (signature value) into "dst". - dst[31:0] := IA32_TSC_AUX[31:0] - - - RDPID -
immintrin.h
- General Support -
- - - - - - Read a hardware generated 16-bit random value and store the result in "val". - Return 1 if a random value was generated, and 0 otherwise. - IF HW_RND_GEN.ready == 1 - val[15:0] := HW_RND_GEN.data - dst := 1 - ELSE - val[15:0] := 0 - dst := 0 - FI - - - RDRAND -
immintrin.h
- Random + RAO_INT +
x86gprintrin.h
+ Arithmetic +
+ + + + Copy the IA32_TSC_AUX MSR (signature value) into "dst". + dst[31:0] := IA32_TSC_AUX[31:0] + + + RDPID +
immintrin.h
+ General Support +
+ + + + + + Read a hardware generated 16-bit random value and store the result in "val". Return 1 if a random value was generated, and 0 otherwise. + IF HW_RND_GEN.ready == 1 + val[15:0] := HW_RND_GEN.data + dst := 1 +ELSE + val[15:0] := 0 + dst := 0 +FI + + + RDRAND +
immintrin.h
+ Random
- - - Read a hardware generated 32-bit random value and store the result in "val". - Return 1 if a random value was generated, and 0 otherwise. - IF HW_RND_GEN.ready == 1 - val[31:0] := HW_RND_GEN.data - dst := 1 - ELSE - val[31:0] := 0 - dst := 0 - FI - - - RDRAND -
immintrin.h
- Random + + + Read a hardware generated 32-bit random value and store the result in "val". Return 1 if a random value was generated, and 0 otherwise. + IF HW_RND_GEN.ready == 1 + val[31:0] := HW_RND_GEN.data + dst := 1 +ELSE + val[31:0] := 0 + dst := 0 +FI + + + RDRAND +
immintrin.h
+ Random
- - - Read a hardware generated 64-bit random value and store the result in "val". - Return 1 if a random value was generated, and 0 otherwise. - IF HW_RND_GEN.ready == 1 - val[63:0] := HW_RND_GEN.data - dst := 1 - ELSE - val[63:0] := 0 - dst := 0 - FI - - - RDRAND -
immintrin.h
- Random -
- - - - - - Read a 16-bit NIST SP800-90B and SP800-90C compliant random value and store in - "val". Return 1 if a random value was generated, and 0 otherwise. - IF HW_NRND_GEN.ready == 1 - val[15:0] := HW_NRND_GEN.data - dst := 1 - ELSE - val[15:0] := 0 - dst := 0 - FI - - - RDSEED -
immintrin.h
- Random + + + Read a hardware generated 64-bit random value and store the result in "val". Return 1 if a random value was generated, and 0 otherwise. + IF HW_RND_GEN.ready == 1 + val[63:0] := HW_RND_GEN.data + dst := 1 +ELSE + val[63:0] := 0 + dst := 0 +FI + + + RDRAND +
immintrin.h
+ Random +
+ + + + + + Read a 16-bit NIST SP800-90B and SP800-90C compliant random value and store in "val". Return 1 if a random value was generated, and 0 otherwise. + IF HW_NRND_GEN.ready == 1 + val[15:0] := HW_NRND_GEN.data + dst := 1 +ELSE + val[15:0] := 0 + dst := 0 +FI + + + RDSEED +
immintrin.h
+ Random
- - - Read a 32-bit NIST SP800-90B and SP800-90C compliant random value and store in - "val". Return 1 if a random value was generated, and 0 otherwise. - IF HW_NRND_GEN.ready == 1 - val[31:0] := HW_NRND_GEN.data - dst := 1 - ELSE - val[31:0] := 0 - dst := 0 - FI - - - RDSEED -
immintrin.h
- Random + + + Read a 32-bit NIST SP800-90B and SP800-90C compliant random value and store in "val". Return 1 if a random value was generated, and 0 otherwise. + IF HW_NRND_GEN.ready == 1 + val[31:0] := HW_NRND_GEN.data + dst := 1 +ELSE + val[31:0] := 0 + dst := 0 +FI + + + RDSEED +
immintrin.h
+ Random
- - - Read a 64-bit NIST SP800-90B and SP800-90C compliant random value and store in - "val". Return 1 if a random value was generated, and 0 otherwise. - IF HW_NRND_GEN.ready == 1 - val[63:0] := HW_NRND_GEN.data - dst := 1 - ELSE - val[63:0] := 0 - dst := 0 - FI - - - RDSEED -
immintrin.h
- Random -
- - - - - - Copy the current 64-bit value of the processor's time-stamp counter into "dst", - and store the IA32_TSC_AUX MSR (signature value) into memory at "mem_addr". - dst[63:0] := TimeStampCounter - MEM[mem_addr+31:mem_addr] := IA32_TSC_AUX[31:0] - - - RDTSCP -
immintrin.h
- General Support -
- - - - - - Force an RTM abort. The EAX register is updated to reflect an XABORT - instruction caused the abort, and the "imm8" parameter will be provided in bits [31:24] - of EAX. - Following an RTM abort, the logical processor resumes execution at the fallback address - computed through the outermost XBEGIN instruction. - IF RTM_ACTIVE == 0 - // nop - ELSE - // restore architectural register state - // discard memory updates performed in transaction - // update EAX with status and imm8 value - eax[31:24] := imm8[7:0] - RTM_NEST_COUNT := 0 - RTM_ACTIVE := 0 - IF _64_BIT_MODE - RIP := fallbackRIP - ELSE - EIP := fallbackEIP - FI - FI - - - RTM -
immintrin.h
- General Support + + + Read a 64-bit NIST SP800-90B and SP800-90C compliant random value and store in "val". Return 1 if a random value was generated, and 0 otherwise. + IF HW_NRND_GEN.ready == 1 + val[63:0] := HW_NRND_GEN.data + dst := 1 +ELSE + val[63:0] := 0 + dst := 0 +FI + + + RDSEED +
immintrin.h
+ Random +
+ + + + + + Copy the current 64-bit value of the processor's time-stamp counter into "dst", and store the IA32_TSC_AUX MSR (signature value) into memory at "mem_addr". + dst[63:0] := TimeStampCounter +MEM[mem_addr+31:mem_addr] := IA32_TSC_AUX[31:0] + + + RDTSCP +
immintrin.h
+ General Support +
+ + + + + + Force an RTM abort. The EAX register is updated to reflect an XABORT instruction caused the abort, and the "imm8" parameter will be provided in bits [31:24] of EAX. + Following an RTM abort, the logical processor resumes execution at the fallback address computed through the outermost XBEGIN instruction. + IF RTM_ACTIVE == 0 + // nop +ELSE + // restore architectural register state + // discard memory updates performed in transaction + // update EAX with status and imm8 value + eax[31:24] := imm8[7:0] + RTM_NEST_COUNT := 0 + RTM_ACTIVE := 0 + IF _64_BIT_MODE + RIP := fallbackRIP + ELSE + EIP := fallbackEIP + FI +FI + + + RTM +
immintrin.h
+ General Support
- - - Specify the start of an RTM code region. - If the logical processor was not already in transactional execution, then this call - causes the logical processor to transition into transactional execution. - On an RTM abort, the logical processor discards all architectural register and memory - updates performed during the RTM execution, restores architectural state, and starts - execution beginning at the fallback address computed from the outermost XBEGIN - instruction. Return status of ~0 (0xFFFF) if continuing inside transaction; all other - codes are aborts. - IF RTM_NEST_COUNT < MAX_RTM_NEST_COUNT - RTM_NEST_COUNT := RTM_NEST_COUNT + 1 - IF RTM_NEST_COUNT == 1 - IF _64_BIT_MODE + + + Specify the start of an RTM code region. + If the logical processor was not already in transactional execution, then this call causes the logical processor to transition into transactional execution. + On an RTM abort, the logical processor discards all architectural register and memory updates performed during the RTM execution, restores architectural state, and starts execution beginning at the fallback address computed from the outermost XBEGIN instruction. Return status of ~0 (0xFFFF) if continuing inside transaction; all other codes are aborts. + IF RTM_NEST_COUNT < MAX_RTM_NEST_COUNT + RTM_NEST_COUNT := RTM_NEST_COUNT + 1 + IF RTM_NEST_COUNT == 1 + IF _64_BIT_MODE fallbackRIP := RIP - ELSE IF _32_BIT_MODE + ELSE IF _32_BIT_MODE fallbackEIP := EIP - FI - - RTM_ACTIVE := 1 - // enter RTM execution, record register state, start tracking memory state - FI - ELSE - // RTM abort (see _xabort) - FI - - - RTM -
immintrin.h
- General Support + FI + + RTM_ACTIVE := 1 + // enter RTM execution, record register state, start tracking memory state + FI +ELSE + // RTM abort (see _xabort) +FI +
+ + RTM +
immintrin.h
+ General Support
- - - Specify the end of an RTM code region. - If this corresponds to the outermost scope, the logical processor will attempt to commit - the logical processor state atomically. - If the commit fails, the logical processor will perform an RTM abort. - IF RTM_ACTIVE == 1 - RTM_NEST_COUNT := RTM_NEST_COUNT - 1 - IF RTM_NEST_COUNT == 0 - // try to commit transaction - IF FAIL_TO_COMMIT_TRANSACTION + + + Specify the end of an RTM code region. + If this corresponds to the outermost scope, the logical processor will attempt to commit the logical processor state atomically. + If the commit fails, the logical processor will perform an RTM abort. + IF RTM_ACTIVE == 1 + RTM_NEST_COUNT := RTM_NEST_COUNT - 1 + IF RTM_NEST_COUNT == 0 + // try to commit transaction + IF FAIL_TO_COMMIT_TRANSACTION // RTM abort (see _xabort) - ELSE - RTM_ACTIVE := 0 - FI - FI - FI - - - RTM -
immintrin.h
- General Support -
- - - - Query the transactional execution status, return 1 if inside a transactionally - executing RTM or HLE region, and return 0 otherwise. - IF (RTM_ACTIVE == 1 OR HLE_ACTIVE == 1) - dst := 1 - ELSE - dst := 0 - FI - - - RTM -
immintrin.h
- General Support -
- - - - - Serialize instruction execution, ensuring all modifications to flags, - registers, and memory by previous instructions are completed before the next instruction - is fetched. - - SERIALIZE -
immintrin.h
- General Support -
- - - - - - - Perform an intermediate calculation for the next four SHA1 message values - (unsigned 32-bit integers) using previous message values from "a" and "b", and store the - result in "dst". - - W0 := a[127:96] - W1 := a[95:64] - W2 := a[63:32] - W3 := a[31:0] - W4 := b[127:96] - W5 := b[95:64] - dst[127:96] := W2 XOR W0 - dst[95:64] := W3 XOR W1 - dst[63:32] := W4 XOR W2 - dst[31:0] := W5 XOR W3 - - - SHA -
immintrin.h
- Cryptography + ELSE + RTM_ACTIVE := 0 + FI + FI +FI + + + RTM +
immintrin.h
+ General Support +
+ + + + Query the transactional execution status, return 1 if inside a transactionally executing RTM or HLE region, and return 0 otherwise. + IF (RTM_ACTIVE == 1 OR HLE_ACTIVE == 1) + dst := 1 +ELSE + dst := 0 +FI + + + RTM +
immintrin.h
+ General Support +
+ + + + + Serialize instruction execution, ensuring all modifications to flags, registers, and memory by previous instructions are completed before the next instruction is fetched. + + SERIALIZE +
immintrin.h
+ General Support +
+ + + + + + + Perform an intermediate calculation for the next four SHA1 message values (unsigned 32-bit integers) using previous message values from "a" and "b", and store the result in "dst". + +W0 := a[127:96] +W1 := a[95:64] +W2 := a[63:32] +W3 := a[31:0] +W4 := b[127:96] +W5 := b[95:64] +dst[127:96] := W2 XOR W0 +dst[95:64] := W3 XOR W1 +dst[63:32] := W4 XOR W2 +dst[31:0] := W5 XOR W3 + + + SHA +
immintrin.h
+ Cryptography
- - - - Perform the final calculation for the next four SHA1 message values (unsigned - 32-bit integers) using the intermediate result in "a" and the previous message values in - "b", and store the result in "dst". - - W13 := b[95:64] - W14 := b[63:32] - W15 := b[31:0] - W16 := (a[127:96] XOR W13) <<< 1 - W17 := (a[95:64] XOR W14) <<< 1 - W18 := (a[63:32] XOR W15) <<< 1 - W19 := (a[31:0] XOR W16) <<< 1 - dst[127:96] := W16 - dst[95:64] := W17 - dst[63:32] := W18 - dst[31:0] := W19 - - - SHA -
immintrin.h
- Cryptography + + + + Perform the final calculation for the next four SHA1 message values (unsigned 32-bit integers) using the intermediate result in "a" and the previous message values in "b", and store the result in "dst". + +W13 := b[95:64] +W14 := b[63:32] +W15 := b[31:0] +W16 := (a[127:96] XOR W13) <<< 1 +W17 := (a[95:64] XOR W14) <<< 1 +W18 := (a[63:32] XOR W15) <<< 1 +W19 := (a[31:0] XOR W16) <<< 1 +dst[127:96] := W16 +dst[95:64] := W17 +dst[63:32] := W18 +dst[31:0] := W19 + + + SHA +
immintrin.h
+ Cryptography
- - - - Calculate SHA1 state variable E after four rounds of operation from the current - SHA1 state variable "a", add that value to the scheduled values (unsigned 32-bit - integers) in "b", and store the result in "dst". - - tmp := (a[127:96] <<< 30) - dst[127:96] := b[127:96] + tmp - dst[95:64] := b[95:64] - dst[63:32] := b[63:32] - dst[31:0] := b[31:0] - - - SHA -
immintrin.h
- Cryptography + + + + Calculate SHA1 state variable E after four rounds of operation from the current SHA1 state variable "a", add that value to the scheduled values (unsigned 32-bit integers) in "b", and store the result in "dst". + +tmp := (a[127:96] <<< 30) +dst[127:96] := b[127:96] + tmp +dst[95:64] := b[95:64] +dst[63:32] := b[63:32] +dst[31:0] := b[31:0] + + + SHA +
immintrin.h
+ Cryptography
- - - - - Perform four rounds of SHA1 operation using an initial SHA1 state (A,B,C,D) - from "a" and some pre-computed sum of the next 4 round message values (unsigned 32-bit - integers), and state variable E from "b", and store the updated SHA1 state (A,B,C,D) in - "dst". "func" contains the logic functions and round constants. - IF (func[1:0] == 0) - f := f0() - K := K0 - ELSE IF (func[1:0] == 1) - f := f1() - K := K1 - ELSE IF (func[1:0] == 2) - f := f2() - K := K2 - ELSE IF (func[1:0] == 3) - f := f3() - K := K3 - FI - A := a[127:96] - B := a[95:64] - C := a[63:32] - D := a[31:0] - W[0] := b[127:96] - W[1] := b[95:64] - W[2] := b[63:32] - W[3] := b[31:0] - A[1] := f(B, C, D) + (A <<< 5) + W[0] + K - B[1] := A - C[1] := B <<< 30 - D[1] := C - E[1] := D - FOR i := 1 to 3 - A[i+1] := f(B[i], C[i], D[i]) + (A[i] <<< 5) + W[i] + E[i] + K - B[i+1] := A[i] - C[i+1] := B[i] <<< 30 - D[i+1] := C[i] - E[i+1] := D[i] - ENDFOR - dst[127:96] := A[4] - dst[95:64] := B[4] - dst[63:32] := C[4] - dst[31:0] := D[4] - - - SHA -
immintrin.h
- Cryptography + + + + + Perform four rounds of SHA1 operation using an initial SHA1 state (A,B,C,D) from "a" and some pre-computed sum of the next 4 round message values (unsigned 32-bit integers), and state variable E from "b", and store the updated SHA1 state (A,B,C,D) in "dst". "func" contains the logic functions and round constants. + IF (func[1:0] == 0) + f := f0() + K := K0 +ELSE IF (func[1:0] == 1) + f := f1() + K := K1 +ELSE IF (func[1:0] == 2) + f := f2() + K := K2 +ELSE IF (func[1:0] == 3) + f := f3() + K := K3 +FI +A := a[127:96] +B := a[95:64] +C := a[63:32] +D := a[31:0] +W[0] := b[127:96] +W[1] := b[95:64] +W[2] := b[63:32] +W[3] := b[31:0] +A[1] := f(B, C, D) + (A <<< 5) + W[0] + K +B[1] := A +C[1] := B <<< 30 +D[1] := C +E[1] := D +FOR i := 1 to 3 + A[i+1] := f(B[i], C[i], D[i]) + (A[i] <<< 5) + W[i] + E[i] + K + B[i+1] := A[i] + C[i+1] := B[i] <<< 30 + D[i+1] := C[i] + E[i+1] := D[i] +ENDFOR +dst[127:96] := A[4] +dst[95:64] := B[4] +dst[63:32] := C[4] +dst[31:0] := D[4] + + + SHA +
immintrin.h
+ Cryptography
- - - - Perform an intermediate calculation for the next four SHA256 message values - (unsigned 32-bit integers) using previous message values from "a" and "b", and store the - result in "dst". - W4 := b[31:0] - W3 := a[127:96] - W2 := a[95:64] - W1 := a[63:32] - W0 := a[31:0] - dst[127:96] := W3 + sigma0(W4) - dst[95:64] := W2 + sigma0(W3) - dst[63:32] := W1 + sigma0(W2) - dst[31:0] := W0 + sigma0(W1) - - - SHA -
immintrin.h
- Cryptography + + + + Perform an intermediate calculation for the next four SHA256 message values (unsigned 32-bit integers) using previous message values from "a" and "b", and store the result in "dst". + W4 := b[31:0] +W3 := a[127:96] +W2 := a[95:64] +W1 := a[63:32] +W0 := a[31:0] +dst[127:96] := W3 + sigma0(W4) +dst[95:64] := W2 + sigma0(W3) +dst[63:32] := W1 + sigma0(W2) +dst[31:0] := W0 + sigma0(W1) + + + SHA +
immintrin.h
+ Cryptography
- - - - Perform the final calculation for the next four SHA256 message values (unsigned - 32-bit integers) using previous message values from "a" and "b", and store the result in - "dst"." - W14 := b[95:64] - W15 := b[127:96] - W16 := a[31:0] + sigma1(W14) - W17 := a[63:32] + sigma1(W15) - W18 := a[95:64] + sigma1(W16) - W19 := a[127:96] + sigma1(W17) - dst[127:96] := W19 - dst[95:64] := W18 - dst[63:32] := W17 - dst[31:0] := W16 - - - SHA -
immintrin.h
- Cryptography + + + + Perform the final calculation for the next four SHA256 message values (unsigned 32-bit integers) using previous message values from "a" and "b", and store the result in "dst"." + W14 := b[95:64] +W15 := b[127:96] +W16 := a[31:0] + sigma1(W14) +W17 := a[63:32] + sigma1(W15) +W18 := a[95:64] + sigma1(W16) +W19 := a[127:96] + sigma1(W17) +dst[127:96] := W19 +dst[95:64] := W18 +dst[63:32] := W17 +dst[31:0] := W16 + + + SHA +
immintrin.h
+ Cryptography
- - - - - Perform 2 rounds of SHA256 operation using an initial SHA256 state (C,D,G,H) - from "a", an initial SHA256 state (A,B,E,F) from "b", and a pre-computed sum of the next - 2 round message values (unsigned 32-bit integers) and the corresponding round constants - from "k", and store the updated SHA256 state (A,B,E,F) in "dst". - A[0] := b[127:96] - B[0] := b[95:64] - C[0] := a[127:96] - D[0] := a[95:64] - E[0] := b[63:32] - F[0] := b[31:0] - G[0] := a[63:32] - H[0] := a[31:0] - W_K[0] := k[31:0] - W_K[1] := k[63:32] - FOR i := 0 to 1 - A[i+1] := Ch(E[i], F[i], G[i]) + sum1(E[i]) + W_K[i] + H[i] + Maj(A[i], B[i], C[i]) + - sum0(A[i]) - B[i+1] := A[i] - C[i+1] := B[i] - D[i+1] := C[i] - E[i+1] := Ch(E[i], F[i], G[i]) + sum1(E[i]) + W_K[i] + H[i] + D[i] - F[i+1] := E[i] - G[i+1] := F[i] - H[i+1] := G[i] - ENDFOR - dst[127:96] := A[2] - dst[95:64] := B[2] - dst[63:32] := E[2] - dst[31:0] := F[2] - - - SHA -
immintrin.h
- Cryptography -
- - - - - SHA512 - This intrinisc is one of the two SHA512 message scheduling instructions. The - intrinsic performs an intermediate calculation for the next four SHA512 message qwords. - The calculated results are stored in "dst". - -msha512 - - - DEFINE ROR64(qword, n) { - count := n % 64 - dest := (qword >> count) | (qword << (64 - count)) - RETURN dest - } - DEFINE SHR64(qword, n) { - RETURN qword >> n - } - DEFINE s0(qword) { - RETURN ROR64(qword,1) ^ ROR64(qword, 8) ^ SHR64(qword, 7) - } - W.qword[4] := __B.qword[0] - W.qword[3] := __A.qword[3] - W.qword[2] := __A.qword[2] - W.qword[1] := __A.qword[1] - W.qword[0] := __A.qword[0] - dst.qword[3] := W.qword[3] + s0(W.qword[4]) - dst.qword[2] := W.qword[2] + s0(W.qword[3]) - dst.qword[1] := W.qword[1] + s0(W.qword[2]) - dst.qword[0] := W.qword[0] + s0(W.qword[1]) + + + + + Perform 2 rounds of SHA256 operation using an initial SHA256 state (C,D,G,H) from "a", an initial SHA256 state (A,B,E,F) from "b", and a pre-computed sum of the next 2 round message values (unsigned 32-bit integers) and the corresponding round constants from "k", and store the updated SHA256 state (A,B,E,F) in "dst". + A[0] := b[127:96] +B[0] := b[95:64] +C[0] := a[127:96] +D[0] := a[95:64] +E[0] := b[63:32] +F[0] := b[31:0] +G[0] := a[63:32] +H[0] := a[31:0] +W_K[0] := k[31:0] +W_K[1] := k[63:32] +FOR i := 0 to 1 + A[i+1] := Ch(E[i], F[i], G[i]) + sum1(E[i]) + W_K[i] + H[i] + Maj(A[i], B[i], C[i]) + sum0(A[i]) + B[i+1] := A[i] + C[i+1] := B[i] + D[i+1] := C[i] + E[i+1] := Ch(E[i], F[i], G[i]) + sum1(E[i]) + W_K[i] + H[i] + D[i] + F[i+1] := E[i] + G[i+1] := F[i] + H[i+1] := G[i] +ENDFOR +dst[127:96] := A[2] +dst[95:64] := B[2] +dst[63:32] := E[2] +dst[31:0] := F[2] + + + SHA +
immintrin.h
+ Cryptography +
+ + + + + This intrinisc is one of the two SHA512 message scheduling instructions. The intrinsic performs an intermediate calculation for the next four SHA512 message qwords. The calculated results are stored in "dst". + + +DEFINE ROR64(qword, n) { + count := n % 64 + dest := (qword >> count) | (qword << (64 - count)) + RETURN dest +} +DEFINE SHR64(qword, n) { + RETURN qword >> n +} +DEFINE s0(qword) { + RETURN ROR64(qword,1) ^ ROR64(qword, 8) ^ SHR64(qword, 7) +} +W.qword[4] := __B.qword[0] +W.qword[3] := __A.qword[3] +W.qword[2] := __A.qword[2] +W.qword[1] := __A.qword[1] +W.qword[0] := __A.qword[0] +dst.qword[3] := W.qword[3] + s0(W.qword[4]) +dst.qword[2] := W.qword[2] + s0(W.qword[3]) +dst.qword[1] := W.qword[1] + s0(W.qword[2]) +dst.qword[0] := W.qword[0] + s0(W.qword[1]) - - - SHA512 -
immintrin.h
- Cryptography -
- - - SHA512 - This intrinisc is one of the two SHA512 message scheduling instructions. The - intrinsic performs the final calculation for the next four SHA512 message qwords. The - calculated results are stored in "dst". - -msha512 - - - DEFINE ROR64(qword, n) { - count := n % 64 - dest := (qword >> count) | (qword << (64 - count)) - RETURN dest - } - DEFINE SHR64(qword, n) { - RETURN qword >> n - } - DEFINE s1(qword) { - RETURN ROR64(qword,19) ^ ROR64(qword, 61) ^ SHR64(qword, 6) - } - W.qword[14] := __B.qword[2] - W.qword[15] := __B.qword[3] - W.qword[16] := __A.qword[0] + s1(W.qword[14]) - W.qword[17] := __A.qword[1] + s1(W.qword[15]) - W.qword[18] := __A.qword[2] + s1(W.qword[16]) - W.qword[19] := __A.qword[3] + s1(W.qword[17]) - dst.qword[3] := W.qword[19] - dst.qword[2] := W.qword[18] - dst.qword[1] := W.qword[17] - dst.qword[0] := W.qword[16] + + + SHA512 + AVX +
immintrin.h
+ Cryptography +
+ + + This intrinisc is one of the two SHA512 message scheduling instructions. The intrinsic performs the final calculation for the next four SHA512 message qwords. The calculated results are stored in "dst". + + +DEFINE ROR64(qword, n) { + count := n % 64 + dest := (qword >> count) | (qword << (64 - count)) + RETURN dest +} +DEFINE SHR64(qword, n) { + RETURN qword >> n +} +DEFINE s1(qword) { + RETURN ROR64(qword,19) ^ ROR64(qword, 61) ^ SHR64(qword, 6) +} +W.qword[14] := __B.qword[2] +W.qword[15] := __B.qword[3] +W.qword[16] := __A.qword[0] + s1(W.qword[14]) +W.qword[17] := __A.qword[1] + s1(W.qword[15]) +W.qword[18] := __A.qword[2] + s1(W.qword[16]) +W.qword[19] := __A.qword[3] + s1(W.qword[17]) +dst.qword[3] := W.qword[19] +dst.qword[2] := W.qword[18] +dst.qword[1] := W.qword[17] +dst.qword[0] := W.qword[16] - - - SHA512 -
immintrin.h
- Cryptography -
- - - SHA512 - This intrinisc performs two rounds of SHA512 operation using initial SHA512 - state (C,D,G,H) from "__A", an initial SHA512 state (A,B,E,F) from "__B", and a - pre-computed sum of the next two round message qwords and the corresponding round - constants from "__C" (only the two lower qwords of the third operand). The updated - SHA512 state (A,B,E,F) is written to "dst", and "dst" can be used as the updated state - (C,D,G,H) in later rounds. - -msha512 - - - DEFINE ROR64(qword, n) { - count := n % 64 - dest := (qword >> count) | (qword << (64 - count)) - RETURN dest - } - DEFINE SHR64(qword, n) { - RETURN qword >> n - } - DEFINE cap_sigma0(qword) { - RETURN ROR64(qword, 28) ^ ROR64(qword, 34) ^ ROR64(qword, 39) - } - DEFINE cap_sigma1(qword) { - RETURN ROR64(qword, 14) ^ ROR64(qword, 18) ^ ROR64(qword, 41) - } - DEFINE MAJ(a,b,c) { - RETURN (a & b) ^ (a & c) ^ (b & c) - } - DEFINE CH(a,b,c) { - RETURN (a & b) ^ (c & ~a) - } - A.qword[0] := __B.qword[3] - B.qword[0] := __B.qword[2] - C.qword[0] := __A.qword[3] - D.qword[0] := __A.qword[2] - E.qword[0] := __B.qword[1] - F.qword[0] := __B.qword[0] - G.qword[0] := __A.qword[1] - H.qword[0] := __A.qword[0] - WK.qword[0]:= __C.qword[0] - WK.qword[1]:= __C.qword[1] - FOR i := 0 to 1 - A.qword[i+1] := CH(E.qword[i], F.qword[i], G.qword[i]) + cap_sigma1(E.qword[i]) + - WK.qword[i] + H.qword[i] + MAJ(A.qword[i], B.qword[i], C.qword[i]) + - cap_sigma0(A.qword[i]) - B.qword[i+1] := A.qword[i] - C.qword[i+1] := B.qword[i] - D.qword[i+1] := C.qword[i] - E.qword[i+1] := CH(E.qword[i], F.qword[i], G.qword[i]) + cap_sigma1(E.qword[i]) + - WK.qword[i] + H.qword[i] + D.qword[i] - F.qword[i+1] := E.qword[i] - G.qword[i+1] := F.qword[i] - H.qword[i+1] := G.qword[i] - ENDFOR - dst.qword[3] := A.qword[2] - dst.qword[2] := B.qword[2] - dst.qword[1] := E.qword[2] - dst.qword[0] := F.qword[2] + + + SHA512 + AVX +
immintrin.h
+ Cryptography +
+ + + This intrinisc performs two rounds of SHA512 operation using initial SHA512 state (C,D,G,H) from "__A", an initial SHA512 state (A,B,E,F) from "__B", and a pre-computed sum of the next two round message qwords and the corresponding round constants from "__C" (only the two lower qwords of the third operand). The updated SHA512 state (A,B,E,F) is written to "dst", and "dst" can be used as the updated state (C,D,G,H) in later rounds. + + +DEFINE ROR64(qword, n) { + count := n % 64 + dest := (qword >> count) | (qword << (64 - count)) + RETURN dest +} +DEFINE SHR64(qword, n) { + RETURN qword >> n +} +DEFINE cap_sigma0(qword) { + RETURN ROR64(qword, 28) ^ ROR64(qword, 34) ^ ROR64(qword, 39) +} +DEFINE cap_sigma1(qword) { + RETURN ROR64(qword, 14) ^ ROR64(qword, 18) ^ ROR64(qword, 41) +} +DEFINE MAJ(a,b,c) { + RETURN (a & b) ^ (a & c) ^ (b & c) +} +DEFINE CH(a,b,c) { + RETURN (a & b) ^ (c & ~a) +} +A.qword[0] := __B.qword[3] +B.qword[0] := __B.qword[2] +C.qword[0] := __A.qword[3] +D.qword[0] := __A.qword[2] +E.qword[0] := __B.qword[1] +F.qword[0] := __B.qword[0] +G.qword[0] := __A.qword[1] +H.qword[0] := __A.qword[0] +WK.qword[0]:= __C.qword[0] +WK.qword[1]:= __C.qword[1] +FOR i := 0 to 1 + A.qword[i+1] := CH(E.qword[i], F.qword[i], G.qword[i]) + cap_sigma1(E.qword[i]) + WK.qword[i] + H.qword[i] + MAJ(A.qword[i], B.qword[i], C.qword[i]) + cap_sigma0(A.qword[i]) + B.qword[i+1] := A.qword[i] + C.qword[i+1] := B.qword[i] + D.qword[i+1] := C.qword[i] + E.qword[i+1] := CH(E.qword[i], F.qword[i], G.qword[i]) + cap_sigma1(E.qword[i]) + WK.qword[i] + H.qword[i] + D.qword[i] + F.qword[i+1] := E.qword[i] + G.qword[i+1] := F.qword[i] + H.qword[i+1] := G.qword[i] +ENDFOR +dst.qword[3] := A.qword[2] +dst.qword[2] := B.qword[2] +dst.qword[1] := E.qword[2] +dst.qword[0] := F.qword[2] - - - - SHA512 -
immintrin.h
- Cryptography -
- - - The VSM3MSG1 intrinsic is one of the two SM3 message scheduling intrinsics. The - intrinsic performs an initial calculation for the next four SM3 message words. The - calculated results are stored in "dst". - - - DEFINE ROL32(dword, n) { - count := n % 32 - dest := (dword << count) | (dword >> (32 - count)) - RETURN dest - } - DEFINE P1(x) { - RETURN x ^ ROL32(x, 15) ^ ROL32(x, 23) - } - W.dword[0] := __C.dword[0] - W.dword[1] := __C.dword[1] - W.dword[2] := __C.dword[2] - W.dword[3] := __C.dword[3] - W.dword[7] := __A.dword[0] - W.dword[8] := __A.dword[1] - W.dword[9] := __A.dword[2] - W.dword[10] := __A.dword[3] - W.dword[13] := __B.dword[0] - W.dword[14] := __B.dword[1] - W.dword[15] := __B.dword[2] - TMP0 := W.dword[7] ^ W.dword[0] ^ ROL32(W.dword[13], 15) - TMP1 := W.dword[8] ^ W.dword[1] ^ ROL32(W.dword[14], 15) - TMP2 := W.dword[9] ^ W.dword[2] ^ ROL32(W.dword[15], 15) - TMP3 := W.dword[10] ^ W.dword[3] - dst.dword[0] := P1(TMP0) - dst.dword[1] := P1(TMP1) - dst.dword[2] := P1(TMP2) - dst.dword[3] := P1(TMP3) + + + + SHA512 + AVX +
immintrin.h
+ Cryptography +
+ + + The VSM3MSG1 intrinsic is one of the two SM3 message scheduling intrinsics. The intrinsic performs an initial calculation for the next four SM3 message words. The calculated results are stored in "dst". + + +DEFINE ROL32(dword, n) { + count := n % 32 + dest := (dword << count) | (dword >> (32 - count)) + RETURN dest +} +DEFINE P1(x) { + RETURN x ^ ROL32(x, 15) ^ ROL32(x, 23) +} +W.dword[0] := __C.dword[0] +W.dword[1] := __C.dword[1] +W.dword[2] := __C.dword[2] +W.dword[3] := __C.dword[3] +W.dword[7] := __A.dword[0] +W.dword[8] := __A.dword[1] +W.dword[9] := __A.dword[2] +W.dword[10] := __A.dword[3] +W.dword[13] := __B.dword[0] +W.dword[14] := __B.dword[1] +W.dword[15] := __B.dword[2] +TMP0 := W.dword[7] ^ W.dword[0] ^ ROL32(W.dword[13], 15) +TMP1 := W.dword[8] ^ W.dword[1] ^ ROL32(W.dword[14], 15) +TMP2 := W.dword[9] ^ W.dword[2] ^ ROL32(W.dword[15], 15) +TMP3 := W.dword[10] ^ W.dword[3] +dst.dword[0] := P1(TMP0) +dst.dword[1] := P1(TMP1) +dst.dword[2] := P1(TMP2) +dst.dword[3] := P1(TMP3) - - - - SM3 -
immintrin.h
- Cryptography -
- - - The VSM3MSG2 intrinsic is one of the two SM3 message scheduling intrinsics. The - intrinsic performs the final calculation for the next four SM3 message words. The - calculated results are stored in "dst". - - - DEFINE ROL32(dword, n) { - count := n % 32 - dest := (dword << count) | (dword >> (32-count)) - RETURN dest - } - WTMP.dword[0] := __A.dword[0] - WTMP.dword[1] := __A.dword[1] - WTMP.dword[2] := __A.dword[2] - WTMP.dword[3] := __A.dword[3] - W.dword[3] := __B.dword[0] - W.dword[4] := __B.dword[1] - W.dword[5] := __B.dword[2] - W.dword[6] := __B.dword[3] - W.dword[10] := __C.dword[0] - W.dword[11] := __C.dword[1] - W.dword[12] := __C.dword[2] - W.dword[13] := __C.dword[3] - W.dword[16] := ROL32(W.dword[3], 7) ^ W.dword[10] ^ WTMP.dword[0] - W.dword[17] := ROL32(W.dword[4], 7) ^ W.dword[11] ^ WTMP.dword[1] - W.dword[18] := ROL32(W.dword[5], 7) ^ W.dword[12] ^ WTMP.dword[2] - W.dword[19] := ROL32(W.dword[6], 7) ^ W.dword[13] ^ WTMP.dword[3] - W.dword[19] := W.dword[19] ^ ROL32(W.dword[16], 6) ^ ROL32(W.dword[16], 15) ^ - ROL32(W.dword[16], 30) - dst.dword[0] := W.dword[16] - dst.dword[1] := W.dword[17] - dst.dword[2] := W.dword[18] - dst.dword[3] := W.dword[19] + + + + SM3 + AVX +
immintrin.h
+ Cryptography +
+ + + The VSM3MSG2 intrinsic is one of the two SM3 message scheduling intrinsics. The intrinsic performs the final calculation for the next four SM3 message words. The calculated results are stored in "dst". + + +DEFINE ROL32(dword, n) { + count := n % 32 + dest := (dword << count) | (dword >> (32-count)) + RETURN dest +} +WTMP.dword[0] := __A.dword[0] +WTMP.dword[1] := __A.dword[1] +WTMP.dword[2] := __A.dword[2] +WTMP.dword[3] := __A.dword[3] +W.dword[3] := __B.dword[0] +W.dword[4] := __B.dword[1] +W.dword[5] := __B.dword[2] +W.dword[6] := __B.dword[3] +W.dword[10] := __C.dword[0] +W.dword[11] := __C.dword[1] +W.dword[12] := __C.dword[2] +W.dword[13] := __C.dword[3] +W.dword[16] := ROL32(W.dword[3], 7) ^ W.dword[10] ^ WTMP.dword[0] +W.dword[17] := ROL32(W.dword[4], 7) ^ W.dword[11] ^ WTMP.dword[1] +W.dword[18] := ROL32(W.dword[5], 7) ^ W.dword[12] ^ WTMP.dword[2] +W.dword[19] := ROL32(W.dword[6], 7) ^ W.dword[13] ^ WTMP.dword[3] +W.dword[19] := W.dword[19] ^ ROL32(W.dword[16], 6) ^ ROL32(W.dword[16], 15) ^ ROL32(W.dword[16], 30) +dst.dword[0] := W.dword[16] +dst.dword[1] := W.dword[17] +dst.dword[2] := W.dword[18] +dst.dword[3] := W.dword[19] - - - - SM3 -
immintrin.h
- Cryptography -
- - - The intrinsic performs two rounds of SM3 operation using initial SM3 state (C, - D, G, H) from "__A", an initial SM3 states (A, B, E, F) from "__B" and a pre-computed - words from the "__C". "__A" with initial SM3 state of (C, D, G, H) assumes input of - non-rotated left variables from previous state. The updated SM3 state (A, B, E, F) is - written to "__A". The "imm8" should contain the even round number for the first of the - two rounds computed by this instruction. The computation masks the "imm8" value by - ANDing it with 0x3E so that only even round numbers from 0 through 62 are used for this - operation. The calculated results are stored in "dst". - - - DEFINE ROL32(dword, n) { - count := n % 32 - dest := (dword << count) | (dword >> (32-count)) - RETURN dest - } - DEFINE P0(x) { - RETURN x ^ ROL32(x, 9) ^ ROL32(x, 17) - } - DEFINE FF(x, y, z, round) { - IF round < 16 - RETURN (x ^ y ^ z) - ELSE - RETURN (x & y) | (x & z) | (y & z) - FI - } - DEFINE GG(x, y, z, round){ - IF round < 16 - RETURN (x ^ y ^ z) - ELSE - RETURN (x & y) | (~x & z) - FI - } - A.dword[0] := __B.dword[3] - B.dword[0] := __B.dword[2] - C.dword[0] := __A.dword[3] - D.dword[0] := __A.dword[2] - E.dword[0] := __B.dword[1] - F.dword[0] := __B.dword[0] - G.dword[0] := __A.dword[1] - H.dword[0] := __A.dword[0] - W.dword[0] := __C.dword[0] - W.dword[1] := __C.dword[1] - W.dword[4] := __C.dword[2] - W.dword[5] := __C.dword[3] - C.dword[0] := ROL32(C.dword[0], 9) - D.dword[0] := ROL32(D.dword[0], 9) - G.dword[0] := ROL32(G.dword[0], 19) - H.dword[0] := ROL32(H.dword[0], 19) - ROUND := imm8 & 0x3E - IF ROUND < 16 - CONST.dword[0] := 0x79CC4519 - ELSE - CONST.dword[0] := 0x7A879D8A - FI - CONST.dword[0] := ROL32(CONST.dword[0], ROUND) - FOR i:= 0 to 1 - temp.dword[0] := ROL32(A.dword[i], 12) + E.dword[i] + CONST.dword[0] - S1.dword[0] := ROL32(temp.dword[0], 7) - S2.dword[0] := S1.dword[0] ^ ROL32(A.dword[i], 12) - T1.dword[0] := FF(A.dword[i], B.dword[i], C.dword[i], ROUND) + D.dword[i] + S2.dword[0] - + (W.dword[i] ^ W.dword[i+4]) - T2.dword[0] := GG(E.dword[i], F.dword[i], G.dword[i], ROUND) + H.dword[i] + S1.dword[0] - + W.dword[i] - D.dword[i+1] := C.dword[i] - C.dword[i+1] := ROL32(B.dword[i], 9) - B.dword[i+1] := A.dword[i] - A.dword[i+1] := T1.dword[0] - H.dword[i+1] := G.dword[i] - G.dword[i+1] := ROL32(F.dword[i], 19) - F.dword[i+1] := E.dword[i] - E.dword[i+1] := P0(T2.dword[0]) - CONST.dword[0] := ROL32(CONST.dword[0], 1) - ENDFOR - dst.dword[3] := A.dword[2] - dst.dword[2] := B.dword[2] - dst.dword[1] := E.dword[2] - dst.dword[0] := F.dword[2] + + + + SM3 + AVX +
immintrin.h
+ Cryptography +
+ + + The intrinsic performs two rounds of SM3 operation using initial SM3 state (C, D, G, H) from "__A", an initial SM3 states (A, B, E, F) from "__B" and a pre-computed words from the "__C". "__A" with initial SM3 state of (C, D, G, H) assumes input of non-rotated left variables from previous state. The updated SM3 state (A, B, E, F) is written to "__A". The "imm8" should contain the even round number for the first of the two rounds computed by this instruction. The computation masks the "imm8" value by ANDing it with 0x3E so that only even round numbers from 0 through 62 are used for this operation. The calculated results are stored in "dst". + + +DEFINE ROL32(dword, n) { + count := n % 32 + dest := (dword << count) | (dword >> (32-count)) + RETURN dest +} +DEFINE P0(x) { + RETURN x ^ ROL32(x, 9) ^ ROL32(x, 17) +} +DEFINE FF(x, y, z, round) { + IF round < 16 + RETURN (x ^ y ^ z) + ELSE + RETURN (x & y) | (x & z) | (y & z) + FI +} +DEFINE GG(x, y, z, round){ + IF round < 16 + RETURN (x ^ y ^ z) + ELSE + RETURN (x & y) | (~x & z) + FI +} +A.dword[0] := __B.dword[3] +B.dword[0] := __B.dword[2] +C.dword[0] := __A.dword[3] +D.dword[0] := __A.dword[2] +E.dword[0] := __B.dword[1] +F.dword[0] := __B.dword[0] +G.dword[0] := __A.dword[1] +H.dword[0] := __A.dword[0] +W.dword[0] := __C.dword[0] +W.dword[1] := __C.dword[1] +W.dword[4] := __C.dword[2] +W.dword[5] := __C.dword[3] +C.dword[0] := ROL32(C.dword[0], 9) +D.dword[0] := ROL32(D.dword[0], 9) +G.dword[0] := ROL32(G.dword[0], 19) +H.dword[0] := ROL32(H.dword[0], 19) +ROUND := imm8 & 0x3E +IF ROUND < 16 + CONST.dword[0] := 0x79CC4519 +ELSE + CONST.dword[0] := 0x7A879D8A +FI +CONST.dword[0] := ROL32(CONST.dword[0], ROUND) +FOR i:= 0 to 1 + temp.dword[0] := ROL32(A.dword[i], 12) + E.dword[i] + CONST.dword[0] + S1.dword[0] := ROL32(temp.dword[0], 7) + S2.dword[0] := S1.dword[0] ^ ROL32(A.dword[i], 12) + T1.dword[0] := FF(A.dword[i], B.dword[i], C.dword[i], ROUND) + D.dword[i] + S2.dword[0] + (W.dword[i] ^ W.dword[i+4]) + T2.dword[0] := GG(E.dword[i], F.dword[i], G.dword[i], ROUND) + H.dword[i] + S1.dword[0] + W.dword[i] + D.dword[i+1] := C.dword[i] + C.dword[i+1] := ROL32(B.dword[i], 9) + B.dword[i+1] := A.dword[i] + A.dword[i+1] := T1.dword[0] + H.dword[i+1] := G.dword[i] + G.dword[i+1] := ROL32(F.dword[i], 19) + F.dword[i+1] := E.dword[i] + E.dword[i+1] := P0(T2.dword[0]) + CONST.dword[0] := ROL32(CONST.dword[0], 1) +ENDFOR +dst.dword[3] := A.dword[2] +dst.dword[2] := B.dword[2] +dst.dword[1] := E.dword[2] +dst.dword[0] := F.dword[2] - - - - - SM3 -
immintrin.h
- Cryptography -
- - - This intrinsic performs four rounds of SM4 key expansion. The intrinsic - operates on independent 128-bit lanes. The calculated results are stored in "dst". - - - BYTE sbox[256] = { - 0xD6, 0x90, 0xE9, 0xFE, 0xCC, 0xE1, 0x3D, 0xB7, 0x16, 0xB6, 0x14, 0xC2, 0x28, 0xFB, - 0x2C, 0x05, - 0x2B, 0x67, 0x9A, 0x76, 0x2A, 0xBE, 0x04, 0xC3, 0xAA, 0x44, 0x13, 0x26, 0x49, 0x86, - 0x06, 0x99, - 0x9C, 0x42, 0x50, 0xF4, 0x91, 0xEF, 0x98, 0x7A, 0x33, 0x54, 0x0B, 0x43, 0xED, 0xCF, - 0xAC, 0x62, - 0xE4, 0xB3, 0x1C, 0xA9, 0xC9, 0x08, 0xE8, 0x95, 0x80, 0xDF, 0x94, 0xFA, 0x75, 0x8F, - 0x3F, 0xA6, - 0x47, 0x07, 0xA7, 0xFC, 0xF3, 0x73, 0x17, 0xBA, 0x83, 0x59, 0x3C, 0x19, 0xE6, 0x85, - 0x4F, 0xA8, - 0x68, 0x6B, 0x81, 0xB2, 0x71, 0x64, 0xDA, 0x8B, 0xF8, 0xEB, 0x0F, 0x4B, 0x70, 0x56, - 0x9D, 0x35, - 0x1E, 0x24, 0x0E, 0x5E, 0x63, 0x58, 0xD1, 0xA2, 0x25, 0x22, 0x7C, 0x3B, 0x01, 0x21, - 0x78, 0x87, - 0xD4, 0x00, 0x46, 0x57, 0x9F, 0xD3, 0x27, 0x52, 0x4C, 0x36, 0x02, 0xE7, 0xA0, 0xC4, - 0xC8, 0x9E, - 0xEA, 0xBF, 0x8A, 0xD2, 0x40, 0xC7, 0x38, 0xB5, 0xA3, 0xF7, 0xF2, 0xCE, 0xF9, 0x61, - 0x15, 0xA1, - 0xE0, 0xAE, 0x5D, 0xA4, 0x9B, 0x34, 0x1A, 0x55, 0xAD, 0x93, 0x32, 0x30, 0xF5, 0x8C, - 0xB1, 0xE3, - 0x1D, 0xF6, 0xE2, 0x2E, 0x82, 0x66, 0xCA, 0x60, 0xC0, 0x29, 0x23, 0xAB, 0x0D, 0x53, - 0x4E, 0x6F, - 0xD5, 0xDB, 0x37, 0x45, 0xDE, 0xFD, 0x8E, 0x2F, 0x03, 0xFF, 0x6A, 0x72, 0x6D, 0x6C, - 0x5B, 0x51, - 0x8D, 0x1B, 0xAF, 0x92, 0xBB, 0xDD, 0xBC, 0x7F, 0x11, 0xD9, 0x5C, 0x41, 0x1F, 0x10, - 0x5A, 0xD8, - 0x0A, 0xC1, 0x31, 0x88, 0xA5, 0xCD, 0x7B, 0xBD, 0x2D, 0x74, 0xD0, 0x12, 0xB8, 0xE5, - 0xB4, 0xB0, - 0x89, 0x69, 0x97, 0x4A, 0x0C, 0x96, 0x77, 0x7E, 0x65, 0xB9, 0xF1, 0x09, 0xC5, 0x6E, - 0xC6, 0x84, - 0x18, 0xF0, 0x7D, 0xEC, 0x3A, 0xDC, 0x4D, 0x20, 0x79, 0xEE, 0x5F, 0x3E, 0xD7, 0xCB, - 0x39, 0x48 - } - DEFINE ROL32(dword, n) { - count := n % 32 - dest := (dword << count) | (dword >> (32-count)) - RETURN dest - } - DEFINE SBOX_BYTE(dword, i) { - RETURN sbox[dword.byte[i]] - } - DEFINE lower_t(dword) { - tmp.byte[0] := SBOX_BYTE(dword, 0) - tmp.byte[1] := SBOX_BYTE(dword, 1) - tmp.byte[2] := SBOX_BYTE(dword, 2) - tmp.byte[3] := SBOX_BYTE(dword, 3) - RETURN tmp - } - DEFINE L_KEY(dword) { - RETURN dword ^ ROL32(dword, 13) ^ ROL32(dword, 23) - } - DEFINE T_KEY(dword) { - RETURN L_KEY(lower_t(dword)) - } - DEFINE F_KEY(X0, X1, X2, X3, round_key) { - RETURN X0 ^ T_KEY(X1 ^ X2 ^ X3 ^ round_key) - } - FOR i:= 0 to 1 - P.dword[0] := __A.dword[4*i] - P.dword[1] := __A.dword[4*i+1] - P.dword[2] := __A.dword[4*i+2] - P.dword[3] := __A.dword[4*i+3] - C.dword[0] := F_KEY(P.dword[0], P.dword[1], P.dword[2], P.dword[3], __B.dword[4*i]) - C.dword[1] := F_KEY(P.dword[1], P.dword[2], P.dword[3], C.dword[0], __B.dword[4*i+1]) - C.dword[2] := F_KEY(P.dword[2], P.dword[3], C.dword[0], C.dword[1], __B.dword[4*i+2]) - C.dword[3] := F_KEY(P.dword[3], C.dword[0], C.dword[1], C.dword[2], __B.dword[4*i+3]) - dst.dword[4*i] := C.dword[0] - dst.dword[4*i+1] := C.dword[1] - dst.dword[4*i+2] := C.dword[2] - dst.dword[4*i+3] := C.dword[3] - ENDFOR - dst[MAX:256] := 0 + + + + + SM3 + AVX +
immintrin.h
+ Cryptography +
+ + + This intrinsic performs four rounds of SM4 key expansion. The intrinsic operates on independent 128-bit lanes. The calculated results are stored in "dst". + + +BYTE sbox[256] = { +0xD6, 0x90, 0xE9, 0xFE, 0xCC, 0xE1, 0x3D, 0xB7, 0x16, 0xB6, 0x14, 0xC2, 0x28, 0xFB, 0x2C, 0x05, +0x2B, 0x67, 0x9A, 0x76, 0x2A, 0xBE, 0x04, 0xC3, 0xAA, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99, +0x9C, 0x42, 0x50, 0xF4, 0x91, 0xEF, 0x98, 0x7A, 0x33, 0x54, 0x0B, 0x43, 0xED, 0xCF, 0xAC, 0x62, +0xE4, 0xB3, 0x1C, 0xA9, 0xC9, 0x08, 0xE8, 0x95, 0x80, 0xDF, 0x94, 0xFA, 0x75, 0x8F, 0x3F, 0xA6, +0x47, 0x07, 0xA7, 0xFC, 0xF3, 0x73, 0x17, 0xBA, 0x83, 0x59, 0x3C, 0x19, 0xE6, 0x85, 0x4F, 0xA8, +0x68, 0x6B, 0x81, 0xB2, 0x71, 0x64, 0xDA, 0x8B, 0xF8, 0xEB, 0x0F, 0x4B, 0x70, 0x56, 0x9D, 0x35, +0x1E, 0x24, 0x0E, 0x5E, 0x63, 0x58, 0xD1, 0xA2, 0x25, 0x22, 0x7C, 0x3B, 0x01, 0x21, 0x78, 0x87, +0xD4, 0x00, 0x46, 0x57, 0x9F, 0xD3, 0x27, 0x52, 0x4C, 0x36, 0x02, 0xE7, 0xA0, 0xC4, 0xC8, 0x9E, +0xEA, 0xBF, 0x8A, 0xD2, 0x40, 0xC7, 0x38, 0xB5, 0xA3, 0xF7, 0xF2, 0xCE, 0xF9, 0x61, 0x15, 0xA1, +0xE0, 0xAE, 0x5D, 0xA4, 0x9B, 0x34, 0x1A, 0x55, 0xAD, 0x93, 0x32, 0x30, 0xF5, 0x8C, 0xB1, 0xE3, +0x1D, 0xF6, 0xE2, 0x2E, 0x82, 0x66, 0xCA, 0x60, 0xC0, 0x29, 0x23, 0xAB, 0x0D, 0x53, 0x4E, 0x6F, +0xD5, 0xDB, 0x37, 0x45, 0xDE, 0xFD, 0x8E, 0x2F, 0x03, 0xFF, 0x6A, 0x72, 0x6D, 0x6C, 0x5B, 0x51, +0x8D, 0x1B, 0xAF, 0x92, 0xBB, 0xDD, 0xBC, 0x7F, 0x11, 0xD9, 0x5C, 0x41, 0x1F, 0x10, 0x5A, 0xD8, +0x0A, 0xC1, 0x31, 0x88, 0xA5, 0xCD, 0x7B, 0xBD, 0x2D, 0x74, 0xD0, 0x12, 0xB8, 0xE5, 0xB4, 0xB0, +0x89, 0x69, 0x97, 0x4A, 0x0C, 0x96, 0x77, 0x7E, 0x65, 0xB9, 0xF1, 0x09, 0xC5, 0x6E, 0xC6, 0x84, +0x18, 0xF0, 0x7D, 0xEC, 0x3A, 0xDC, 0x4D, 0x20, 0x79, 0xEE, 0x5F, 0x3E, 0xD7, 0xCB, 0x39, 0x48 +} +DEFINE ROL32(dword, n) { + count := n % 32 + dest := (dword << count) | (dword >> (32-count)) + RETURN dest +} +DEFINE SBOX_BYTE(dword, i) { + RETURN sbox[dword.byte[i]] +} +DEFINE lower_t(dword) { + tmp.byte[0] := SBOX_BYTE(dword, 0) + tmp.byte[1] := SBOX_BYTE(dword, 1) + tmp.byte[2] := SBOX_BYTE(dword, 2) + tmp.byte[3] := SBOX_BYTE(dword, 3) + RETURN tmp +} +DEFINE L_KEY(dword) { + RETURN dword ^ ROL32(dword, 13) ^ ROL32(dword, 23) +} +DEFINE T_KEY(dword) { + RETURN L_KEY(lower_t(dword)) +} +DEFINE F_KEY(X0, X1, X2, X3, round_key) { + RETURN X0 ^ T_KEY(X1 ^ X2 ^ X3 ^ round_key) +} +FOR i:= 0 to 1 + P.dword[0] := __A.dword[4*i] + P.dword[1] := __A.dword[4*i+1] + P.dword[2] := __A.dword[4*i+2] + P.dword[3] := __A.dword[4*i+3] + C.dword[0] := F_KEY(P.dword[0], P.dword[1], P.dword[2], P.dword[3], __B.dword[4*i]) + C.dword[1] := F_KEY(P.dword[1], P.dword[2], P.dword[3], C.dword[0], __B.dword[4*i+1]) + C.dword[2] := F_KEY(P.dword[2], P.dword[3], C.dword[0], C.dword[1], __B.dword[4*i+2]) + C.dword[3] := F_KEY(P.dword[3], C.dword[0], C.dword[1], C.dword[2], __B.dword[4*i+3]) + dst.dword[4*i] := C.dword[0] + dst.dword[4*i+1] := C.dword[1] + dst.dword[4*i+2] := C.dword[2] + dst.dword[4*i+3] := C.dword[3] +ENDFOR +dst[MAX:256] := 0 - - - SM4 -
immintrin.h
- Cryptography -
- - - This intrinisc performs four rounds of SM4 encryption. The intrinisc operates - on independent 128-bit lanes. The calculated results are stored in "dst". - - BYTE sbox[256] = { - 0xD6, 0x90, 0xE9, 0xFE, 0xCC, 0xE1, 0x3D, 0xB7, 0x16, 0xB6, 0x14, 0xC2, 0x28, 0xFB, - 0x2C, 0x05, - 0x2B, 0x67, 0x9A, 0x76, 0x2A, 0xBE, 0x04, 0xC3, 0xAA, 0x44, 0x13, 0x26, 0x49, 0x86, - 0x06, 0x99, - 0x9C, 0x42, 0x50, 0xF4, 0x91, 0xEF, 0x98, 0x7A, 0x33, 0x54, 0x0B, 0x43, 0xED, 0xCF, - 0xAC, 0x62, - 0xE4, 0xB3, 0x1C, 0xA9, 0xC9, 0x08, 0xE8, 0x95, 0x80, 0xDF, 0x94, 0xFA, 0x75, 0x8F, - 0x3F, 0xA6, - 0x47, 0x07, 0xA7, 0xFC, 0xF3, 0x73, 0x17, 0xBA, 0x83, 0x59, 0x3C, 0x19, 0xE6, 0x85, - 0x4F, 0xA8, - 0x68, 0x6B, 0x81, 0xB2, 0x71, 0x64, 0xDA, 0x8B, 0xF8, 0xEB, 0x0F, 0x4B, 0x70, 0x56, - 0x9D, 0x35, - 0x1E, 0x24, 0x0E, 0x5E, 0x63, 0x58, 0xD1, 0xA2, 0x25, 0x22, 0x7C, 0x3B, 0x01, 0x21, - 0x78, 0x87, - 0xD4, 0x00, 0x46, 0x57, 0x9F, 0xD3, 0x27, 0x52, 0x4C, 0x36, 0x02, 0xE7, 0xA0, 0xC4, - 0xC8, 0x9E, - 0xEA, 0xBF, 0x8A, 0xD2, 0x40, 0xC7, 0x38, 0xB5, 0xA3, 0xF7, 0xF2, 0xCE, 0xF9, 0x61, - 0x15, 0xA1, - 0xE0, 0xAE, 0x5D, 0xA4, 0x9B, 0x34, 0x1A, 0x55, 0xAD, 0x93, 0x32, 0x30, 0xF5, 0x8C, - 0xB1, 0xE3, - 0x1D, 0xF6, 0xE2, 0x2E, 0x82, 0x66, 0xCA, 0x60, 0xC0, 0x29, 0x23, 0xAB, 0x0D, 0x53, - 0x4E, 0x6F, - 0xD5, 0xDB, 0x37, 0x45, 0xDE, 0xFD, 0x8E, 0x2F, 0x03, 0xFF, 0x6A, 0x72, 0x6D, 0x6C, - 0x5B, 0x51, - 0x8D, 0x1B, 0xAF, 0x92, 0xBB, 0xDD, 0xBC, 0x7F, 0x11, 0xD9, 0x5C, 0x41, 0x1F, 0x10, - 0x5A, 0xD8, - 0x0A, 0xC1, 0x31, 0x88, 0xA5, 0xCD, 0x7B, 0xBD, 0x2D, 0x74, 0xD0, 0x12, 0xB8, 0xE5, - 0xB4, 0xB0, - 0x89, 0x69, 0x97, 0x4A, 0x0C, 0x96, 0x77, 0x7E, 0x65, 0xB9, 0xF1, 0x09, 0xC5, 0x6E, - 0xC6, 0x84, - 0x18, 0xF0, 0x7D, 0xEC, 0x3A, 0xDC, 0x4D, 0x20, 0x79, 0xEE, 0x5F, 0x3E, 0xD7, 0xCB, - 0x39, 0x48 - } - DEFINE ROL32(dword, n) { - count := n % 32 - dest := (dword << count) | (dword >> (32-count)) - RETURN dest - } - DEFINE SBOX_BYTE(dword, i) { - RETURN sbox[dword.byte[i]] - } - DEFINE lower_t(dword) { - tmp.byte[0] := SBOX_BYTE(dword, 0) - tmp.byte[1] := SBOX_BYTE(dword, 1) - tmp.byte[2] := SBOX_BYTE(dword, 2) - tmp.byte[3] := SBOX_BYTE(dword, 3) - RETURN tmp - } - DEFINE L_RND(dword) { - tmp := dword - tmp := tmp ^ ROL32(dword, 2) - tmp := tmp ^ ROL32(dword, 10) - tmp := tmp ^ ROL32(dword, 18) - tmp := tmp ^ ROL32(dword, 24) - RETURN tmp - } - DEFINE T_RND(dword) { - RETURN L_RND(lower_t(dword)) - } - DEFINE F_RND(X0, X1, X2, X3, round_key) { - RETURN X0 ^ T_RND(X1 ^ X2 ^ X3 ^ round_key) - } - FOR i:= 0 to 1 - P.dword[0] := __A.dword[4*i] - P.dword[1] := __A.dword[4*i+1] - P.dword[2] := __A.dword[4*i+2] - P.dword[3] := __A.dword[4*i+3] - C.dword[0] := F_RND(P.dword[0], P.dword[1], P.dword[2], P.dword[3], __B.dword[4*i]) - C.dword[1] := F_RND(P.dword[1], P.dword[2], P.dword[3], C.dword[0], __B.dword[4*i+1]) - C.dword[2] := F_RND(P.dword[2], P.dword[3], C.dword[0], C.dword[1], __B.dword[4*i+2]) - C.dword[3] := F_RND(P.dword[3], C.dword[0], C.dword[1], C.dword[2], __B.dword[4*i+3]) - dst.dword[4*i] := C.dword[0] - dst.dword[4*i+1] := C.dword[1] - dst.dword[4*i+2] := C.dword[2] - dst.dword[4*i+3] := C.dword[3] - ENDFOR - dst[MAX:256] := 0 + + + SM4 + AVX +
immintrin.h
+ Cryptography +
+ + + This intrinisc performs four rounds of SM4 encryption. The intrinisc operates on independent 128-bit lanes. The calculated results are stored in "dst". + + BYTE sbox[256] = { +0xD6, 0x90, 0xE9, 0xFE, 0xCC, 0xE1, 0x3D, 0xB7, 0x16, 0xB6, 0x14, 0xC2, 0x28, 0xFB, 0x2C, 0x05, +0x2B, 0x67, 0x9A, 0x76, 0x2A, 0xBE, 0x04, 0xC3, 0xAA, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99, +0x9C, 0x42, 0x50, 0xF4, 0x91, 0xEF, 0x98, 0x7A, 0x33, 0x54, 0x0B, 0x43, 0xED, 0xCF, 0xAC, 0x62, +0xE4, 0xB3, 0x1C, 0xA9, 0xC9, 0x08, 0xE8, 0x95, 0x80, 0xDF, 0x94, 0xFA, 0x75, 0x8F, 0x3F, 0xA6, +0x47, 0x07, 0xA7, 0xFC, 0xF3, 0x73, 0x17, 0xBA, 0x83, 0x59, 0x3C, 0x19, 0xE6, 0x85, 0x4F, 0xA8, +0x68, 0x6B, 0x81, 0xB2, 0x71, 0x64, 0xDA, 0x8B, 0xF8, 0xEB, 0x0F, 0x4B, 0x70, 0x56, 0x9D, 0x35, +0x1E, 0x24, 0x0E, 0x5E, 0x63, 0x58, 0xD1, 0xA2, 0x25, 0x22, 0x7C, 0x3B, 0x01, 0x21, 0x78, 0x87, +0xD4, 0x00, 0x46, 0x57, 0x9F, 0xD3, 0x27, 0x52, 0x4C, 0x36, 0x02, 0xE7, 0xA0, 0xC4, 0xC8, 0x9E, +0xEA, 0xBF, 0x8A, 0xD2, 0x40, 0xC7, 0x38, 0xB5, 0xA3, 0xF7, 0xF2, 0xCE, 0xF9, 0x61, 0x15, 0xA1, +0xE0, 0xAE, 0x5D, 0xA4, 0x9B, 0x34, 0x1A, 0x55, 0xAD, 0x93, 0x32, 0x30, 0xF5, 0x8C, 0xB1, 0xE3, +0x1D, 0xF6, 0xE2, 0x2E, 0x82, 0x66, 0xCA, 0x60, 0xC0, 0x29, 0x23, 0xAB, 0x0D, 0x53, 0x4E, 0x6F, +0xD5, 0xDB, 0x37, 0x45, 0xDE, 0xFD, 0x8E, 0x2F, 0x03, 0xFF, 0x6A, 0x72, 0x6D, 0x6C, 0x5B, 0x51, +0x8D, 0x1B, 0xAF, 0x92, 0xBB, 0xDD, 0xBC, 0x7F, 0x11, 0xD9, 0x5C, 0x41, 0x1F, 0x10, 0x5A, 0xD8, +0x0A, 0xC1, 0x31, 0x88, 0xA5, 0xCD, 0x7B, 0xBD, 0x2D, 0x74, 0xD0, 0x12, 0xB8, 0xE5, 0xB4, 0xB0, +0x89, 0x69, 0x97, 0x4A, 0x0C, 0x96, 0x77, 0x7E, 0x65, 0xB9, 0xF1, 0x09, 0xC5, 0x6E, 0xC6, 0x84, +0x18, 0xF0, 0x7D, 0xEC, 0x3A, 0xDC, 0x4D, 0x20, 0x79, 0xEE, 0x5F, 0x3E, 0xD7, 0xCB, 0x39, 0x48 +} +DEFINE ROL32(dword, n) { + count := n % 32 + dest := (dword << count) | (dword >> (32-count)) + RETURN dest +} +DEFINE SBOX_BYTE(dword, i) { + RETURN sbox[dword.byte[i]] +} +DEFINE lower_t(dword) { + tmp.byte[0] := SBOX_BYTE(dword, 0) + tmp.byte[1] := SBOX_BYTE(dword, 1) + tmp.byte[2] := SBOX_BYTE(dword, 2) + tmp.byte[3] := SBOX_BYTE(dword, 3) + RETURN tmp +} +DEFINE L_RND(dword) { + tmp := dword + tmp := tmp ^ ROL32(dword, 2) + tmp := tmp ^ ROL32(dword, 10) + tmp := tmp ^ ROL32(dword, 18) + tmp := tmp ^ ROL32(dword, 24) + RETURN tmp +} +DEFINE T_RND(dword) { + RETURN L_RND(lower_t(dword)) +} +DEFINE F_RND(X0, X1, X2, X3, round_key) { + RETURN X0 ^ T_RND(X1 ^ X2 ^ X3 ^ round_key) +} +FOR i:= 0 to 1 + P.dword[0] := __A.dword[4*i] + P.dword[1] := __A.dword[4*i+1] + P.dword[2] := __A.dword[4*i+2] + P.dword[3] := __A.dword[4*i+3] + C.dword[0] := F_RND(P.dword[0], P.dword[1], P.dword[2], P.dword[3], __B.dword[4*i]) + C.dword[1] := F_RND(P.dword[1], P.dword[2], P.dword[3], C.dword[0], __B.dword[4*i+1]) + C.dword[2] := F_RND(P.dword[2], P.dword[3], C.dword[0], C.dword[1], __B.dword[4*i+2]) + C.dword[3] := F_RND(P.dword[3], C.dword[0], C.dword[1], C.dword[2], __B.dword[4*i+3]) + dst.dword[4*i] := C.dword[0] + dst.dword[4*i+1] := C.dword[1] + dst.dword[4*i+2] := C.dword[2] + dst.dword[4*i+3] := C.dword[3] +ENDFOR +dst[MAX:256] := 0 - - - SM4 -
immintrin.h
- Cryptography -
- - - This intrinsic performs four rounds of SM4 key expansion. The intrinsic - operates on independent 128-bit lanes. The calculated results are stored in "dst". - - - BYTE sbox[256] = { - 0xD6, 0x90, 0xE9, 0xFE, 0xCC, 0xE1, 0x3D, 0xB7, 0x16, 0xB6, 0x14, 0xC2, 0x28, 0xFB, - 0x2C, 0x05, - 0x2B, 0x67, 0x9A, 0x76, 0x2A, 0xBE, 0x04, 0xC3, 0xAA, 0x44, 0x13, 0x26, 0x49, 0x86, - 0x06, 0x99, - 0x9C, 0x42, 0x50, 0xF4, 0x91, 0xEF, 0x98, 0x7A, 0x33, 0x54, 0x0B, 0x43, 0xED, 0xCF, - 0xAC, 0x62, - 0xE4, 0xB3, 0x1C, 0xA9, 0xC9, 0x08, 0xE8, 0x95, 0x80, 0xDF, 0x94, 0xFA, 0x75, 0x8F, - 0x3F, 0xA6, - 0x47, 0x07, 0xA7, 0xFC, 0xF3, 0x73, 0x17, 0xBA, 0x83, 0x59, 0x3C, 0x19, 0xE6, 0x85, - 0x4F, 0xA8, - 0x68, 0x6B, 0x81, 0xB2, 0x71, 0x64, 0xDA, 0x8B, 0xF8, 0xEB, 0x0F, 0x4B, 0x70, 0x56, - 0x9D, 0x35, - 0x1E, 0x24, 0x0E, 0x5E, 0x63, 0x58, 0xD1, 0xA2, 0x25, 0x22, 0x7C, 0x3B, 0x01, 0x21, - 0x78, 0x87, - 0xD4, 0x00, 0x46, 0x57, 0x9F, 0xD3, 0x27, 0x52, 0x4C, 0x36, 0x02, 0xE7, 0xA0, 0xC4, - 0xC8, 0x9E, - 0xEA, 0xBF, 0x8A, 0xD2, 0x40, 0xC7, 0x38, 0xB5, 0xA3, 0xF7, 0xF2, 0xCE, 0xF9, 0x61, - 0x15, 0xA1, - 0xE0, 0xAE, 0x5D, 0xA4, 0x9B, 0x34, 0x1A, 0x55, 0xAD, 0x93, 0x32, 0x30, 0xF5, 0x8C, - 0xB1, 0xE3, - 0x1D, 0xF6, 0xE2, 0x2E, 0x82, 0x66, 0xCA, 0x60, 0xC0, 0x29, 0x23, 0xAB, 0x0D, 0x53, - 0x4E, 0x6F, - 0xD5, 0xDB, 0x37, 0x45, 0xDE, 0xFD, 0x8E, 0x2F, 0x03, 0xFF, 0x6A, 0x72, 0x6D, 0x6C, - 0x5B, 0x51, - 0x8D, 0x1B, 0xAF, 0x92, 0xBB, 0xDD, 0xBC, 0x7F, 0x11, 0xD9, 0x5C, 0x41, 0x1F, 0x10, - 0x5A, 0xD8, - 0x0A, 0xC1, 0x31, 0x88, 0xA5, 0xCD, 0x7B, 0xBD, 0x2D, 0x74, 0xD0, 0x12, 0xB8, 0xE5, - 0xB4, 0xB0, - 0x89, 0x69, 0x97, 0x4A, 0x0C, 0x96, 0x77, 0x7E, 0x65, 0xB9, 0xF1, 0x09, 0xC5, 0x6E, - 0xC6, 0x84, - 0x18, 0xF0, 0x7D, 0xEC, 0x3A, 0xDC, 0x4D, 0x20, 0x79, 0xEE, 0x5F, 0x3E, 0xD7, 0xCB, - 0x39, 0x48 - } - DEFINE ROL32(dword, n) { - count := n % 32 - dest := (dword << count) | (dword >> (32-count)) - RETURN dest - } - DEFINE SBOX_BYTE(dword, i) { - RETURN sbox[dword.byte[i]] - } - DEFINE lower_t(dword) { - tmp.byte[0] := SBOX_BYTE(dword, 0) - tmp.byte[1] := SBOX_BYTE(dword, 1) - tmp.byte[2] := SBOX_BYTE(dword, 2) - tmp.byte[3] := SBOX_BYTE(dword, 3) - RETURN tmp - } - DEFINE L_KEY(dword) { - RETURN dword ^ ROL32(dword, 13) ^ ROL32(dword, 23) - } - DEFINE T_KEY(dword) { - RETURN L_KEY(lower_t(dword)) - } - DEFINE F_KEY(X0, X1, X2, X3, round_key) { - RETURN X0 ^ T_KEY(X1 ^ X2 ^ X3 ^ round_key) - } - P.dword[0] := __A.dword[0] - P.dword[1] := __A.dword[1] - P.dword[2] := __A.dword[2] - P.dword[3] := __A.dword[3] - C.dword[0] := F_KEY(P.dword[0], P.dword[1], P.dword[2], P.dword[3], __B.dword[0]) - C.dword[1] := F_KEY(P.dword[1], P.dword[2], P.dword[3], C.dword[0], __B.dword[1]) - C.dword[2] := F_KEY(P.dword[2], P.dword[3], C.dword[0], C.dword[1], __B.dword[2]) - C.dword[3] := F_KEY(P.dword[3], C.dword[0], C.dword[1], C.dword[2], __B.dword[3]) - dst.dword[0] := C.dword[0] - dst.dword[1] := C.dword[1] - dst.dword[2] := C.dword[2] - dst.dword[3] := C.dword[3] - dst[MAX:128] := 0 + + + SM4 + AVX +
immintrin.h
+ Cryptography +
+ + + This intrinsic performs four rounds of SM4 key expansion. The intrinsic operates on independent 128-bit lanes. The calculated results are stored in "dst". + + +BYTE sbox[256] = { +0xD6, 0x90, 0xE9, 0xFE, 0xCC, 0xE1, 0x3D, 0xB7, 0x16, 0xB6, 0x14, 0xC2, 0x28, 0xFB, 0x2C, 0x05, +0x2B, 0x67, 0x9A, 0x76, 0x2A, 0xBE, 0x04, 0xC3, 0xAA, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99, +0x9C, 0x42, 0x50, 0xF4, 0x91, 0xEF, 0x98, 0x7A, 0x33, 0x54, 0x0B, 0x43, 0xED, 0xCF, 0xAC, 0x62, +0xE4, 0xB3, 0x1C, 0xA9, 0xC9, 0x08, 0xE8, 0x95, 0x80, 0xDF, 0x94, 0xFA, 0x75, 0x8F, 0x3F, 0xA6, +0x47, 0x07, 0xA7, 0xFC, 0xF3, 0x73, 0x17, 0xBA, 0x83, 0x59, 0x3C, 0x19, 0xE6, 0x85, 0x4F, 0xA8, +0x68, 0x6B, 0x81, 0xB2, 0x71, 0x64, 0xDA, 0x8B, 0xF8, 0xEB, 0x0F, 0x4B, 0x70, 0x56, 0x9D, 0x35, +0x1E, 0x24, 0x0E, 0x5E, 0x63, 0x58, 0xD1, 0xA2, 0x25, 0x22, 0x7C, 0x3B, 0x01, 0x21, 0x78, 0x87, +0xD4, 0x00, 0x46, 0x57, 0x9F, 0xD3, 0x27, 0x52, 0x4C, 0x36, 0x02, 0xE7, 0xA0, 0xC4, 0xC8, 0x9E, +0xEA, 0xBF, 0x8A, 0xD2, 0x40, 0xC7, 0x38, 0xB5, 0xA3, 0xF7, 0xF2, 0xCE, 0xF9, 0x61, 0x15, 0xA1, +0xE0, 0xAE, 0x5D, 0xA4, 0x9B, 0x34, 0x1A, 0x55, 0xAD, 0x93, 0x32, 0x30, 0xF5, 0x8C, 0xB1, 0xE3, +0x1D, 0xF6, 0xE2, 0x2E, 0x82, 0x66, 0xCA, 0x60, 0xC0, 0x29, 0x23, 0xAB, 0x0D, 0x53, 0x4E, 0x6F, +0xD5, 0xDB, 0x37, 0x45, 0xDE, 0xFD, 0x8E, 0x2F, 0x03, 0xFF, 0x6A, 0x72, 0x6D, 0x6C, 0x5B, 0x51, +0x8D, 0x1B, 0xAF, 0x92, 0xBB, 0xDD, 0xBC, 0x7F, 0x11, 0xD9, 0x5C, 0x41, 0x1F, 0x10, 0x5A, 0xD8, +0x0A, 0xC1, 0x31, 0x88, 0xA5, 0xCD, 0x7B, 0xBD, 0x2D, 0x74, 0xD0, 0x12, 0xB8, 0xE5, 0xB4, 0xB0, +0x89, 0x69, 0x97, 0x4A, 0x0C, 0x96, 0x77, 0x7E, 0x65, 0xB9, 0xF1, 0x09, 0xC5, 0x6E, 0xC6, 0x84, +0x18, 0xF0, 0x7D, 0xEC, 0x3A, 0xDC, 0x4D, 0x20, 0x79, 0xEE, 0x5F, 0x3E, 0xD7, 0xCB, 0x39, 0x48 +} +DEFINE ROL32(dword, n) { + count := n % 32 + dest := (dword << count) | (dword >> (32-count)) + RETURN dest +} +DEFINE SBOX_BYTE(dword, i) { + RETURN sbox[dword.byte[i]] +} +DEFINE lower_t(dword) { + tmp.byte[0] := SBOX_BYTE(dword, 0) + tmp.byte[1] := SBOX_BYTE(dword, 1) + tmp.byte[2] := SBOX_BYTE(dword, 2) + tmp.byte[3] := SBOX_BYTE(dword, 3) + RETURN tmp +} +DEFINE L_KEY(dword) { + RETURN dword ^ ROL32(dword, 13) ^ ROL32(dword, 23) +} +DEFINE T_KEY(dword) { + RETURN L_KEY(lower_t(dword)) +} +DEFINE F_KEY(X0, X1, X2, X3, round_key) { + RETURN X0 ^ T_KEY(X1 ^ X2 ^ X3 ^ round_key) +} +P.dword[0] := __A.dword[0] +P.dword[1] := __A.dword[1] +P.dword[2] := __A.dword[2] +P.dword[3] := __A.dword[3] +C.dword[0] := F_KEY(P.dword[0], P.dword[1], P.dword[2], P.dword[3], __B.dword[0]) +C.dword[1] := F_KEY(P.dword[1], P.dword[2], P.dword[3], C.dword[0], __B.dword[1]) +C.dword[2] := F_KEY(P.dword[2], P.dword[3], C.dword[0], C.dword[1], __B.dword[2]) +C.dword[3] := F_KEY(P.dword[3], C.dword[0], C.dword[1], C.dword[2], __B.dword[3]) +dst.dword[0] := C.dword[0] +dst.dword[1] := C.dword[1] +dst.dword[2] := C.dword[2] +dst.dword[3] := C.dword[3] +dst[MAX:128] := 0 - - - SM4 -
immintrin.h
- Cryptography -
- - - This intrinisc performs four rounds of SM4 encryption. The intrinisc operates - on independent 128-bit lanes. The calculated results are stored in "dst". - - - BYTE sbox[256] = { - 0xD6, 0x90, 0xE9, 0xFE, 0xCC, 0xE1, 0x3D, 0xB7, 0x16, 0xB6, 0x14, 0xC2, 0x28, 0xFB, - 0x2C, 0x05, - 0x2B, 0x67, 0x9A, 0x76, 0x2A, 0xBE, 0x04, 0xC3, 0xAA, 0x44, 0x13, 0x26, 0x49, 0x86, - 0x06, 0x99, - 0x9C, 0x42, 0x50, 0xF4, 0x91, 0xEF, 0x98, 0x7A, 0x33, 0x54, 0x0B, 0x43, 0xED, 0xCF, - 0xAC, 0x62, - 0xE4, 0xB3, 0x1C, 0xA9, 0xC9, 0x08, 0xE8, 0x95, 0x80, 0xDF, 0x94, 0xFA, 0x75, 0x8F, - 0x3F, 0xA6, - 0x47, 0x07, 0xA7, 0xFC, 0xF3, 0x73, 0x17, 0xBA, 0x83, 0x59, 0x3C, 0x19, 0xE6, 0x85, - 0x4F, 0xA8, - 0x68, 0x6B, 0x81, 0xB2, 0x71, 0x64, 0xDA, 0x8B, 0xF8, 0xEB, 0x0F, 0x4B, 0x70, 0x56, - 0x9D, 0x35, - 0x1E, 0x24, 0x0E, 0x5E, 0x63, 0x58, 0xD1, 0xA2, 0x25, 0x22, 0x7C, 0x3B, 0x01, 0x21, - 0x78, 0x87, - 0xD4, 0x00, 0x46, 0x57, 0x9F, 0xD3, 0x27, 0x52, 0x4C, 0x36, 0x02, 0xE7, 0xA0, 0xC4, - 0xC8, 0x9E, - 0xEA, 0xBF, 0x8A, 0xD2, 0x40, 0xC7, 0x38, 0xB5, 0xA3, 0xF7, 0xF2, 0xCE, 0xF9, 0x61, - 0x15, 0xA1, - 0xE0, 0xAE, 0x5D, 0xA4, 0x9B, 0x34, 0x1A, 0x55, 0xAD, 0x93, 0x32, 0x30, 0xF5, 0x8C, - 0xB1, 0xE3, - 0x1D, 0xF6, 0xE2, 0x2E, 0x82, 0x66, 0xCA, 0x60, 0xC0, 0x29, 0x23, 0xAB, 0x0D, 0x53, - 0x4E, 0x6F, - 0xD5, 0xDB, 0x37, 0x45, 0xDE, 0xFD, 0x8E, 0x2F, 0x03, 0xFF, 0x6A, 0x72, 0x6D, 0x6C, - 0x5B, 0x51, - 0x8D, 0x1B, 0xAF, 0x92, 0xBB, 0xDD, 0xBC, 0x7F, 0x11, 0xD9, 0x5C, 0x41, 0x1F, 0x10, - 0x5A, 0xD8, - 0x0A, 0xC1, 0x31, 0x88, 0xA5, 0xCD, 0x7B, 0xBD, 0x2D, 0x74, 0xD0, 0x12, 0xB8, 0xE5, - 0xB4, 0xB0, - 0x89, 0x69, 0x97, 0x4A, 0x0C, 0x96, 0x77, 0x7E, 0x65, 0xB9, 0xF1, 0x09, 0xC5, 0x6E, - 0xC6, 0x84, - 0x18, 0xF0, 0x7D, 0xEC, 0x3A, 0xDC, 0x4D, 0x20, 0x79, 0xEE, 0x5F, 0x3E, 0xD7, 0xCB, - 0x39, 0x48 - } - DEFINE ROL32(dword, n) { - count := n % 32 - dest := (dword << count) | (dword >> (32-count)) - RETURN dest - } - DEFINE SBOX_BYTE(dword, i) { - RETURN sbox[dword.byte[i]] - } - DEFINE lower_t(dword) { - tmp.byte[0] := SBOX_BYTE(dword, 0) - tmp.byte[1] := SBOX_BYTE(dword, 1) - tmp.byte[2] := SBOX_BYTE(dword, 2) - tmp.byte[3] := SBOX_BYTE(dword, 3) - RETURN tmp - } - DEFINE L_RND(dword) { - tmp := dword - tmp := tmp ^ ROL32(dword, 2) - tmp := tmp ^ ROL32(dword, 10) - tmp := tmp ^ ROL32(dword, 18) - tmp := tmp ^ ROL32(dword, 24) - RETURN tmp - } - DEFINE T_RND(dword) { - RETURN L_RND(lower_t(dword)) - } - DEFINE F_RND(X0, X1, X2, X3, round_key) { - RETURN X0 ^ T_RND(X1 ^ X2 ^ X3 ^ round_key) - } - P.dword[0] := __A.dword[0] - P.dword[1] := __A.dword[1] - P.dword[2] := __A.dword[2] - P.dword[3] := __A.dword[3] - C.dword[0] := F_RND(P.dword[0], P.dword[1], P.dword[2], P.dword[3], __B.dword[0]) - C.dword[1] := F_RND(P.dword[1], P.dword[2], P.dword[3], C.dword[0], __B.dword[1]) - C.dword[2] := F_RND(P.dword[2], P.dword[3], C.dword[0], C.dword[1], __B.dword[2]) - C.dword[3] := F_RND(P.dword[3], C.dword[0], C.dword[1], C.dword[2], __B.dword[3]) - dst.dword[0] := C.dword[0] - dst.dword[1] := C.dword[1] - dst.dword[2] := C.dword[2] - dst.dword[3] := C.dword[3] - dst[MAX:128] := 0 + + + SM4 + AVX +
immintrin.h
+ Cryptography +
+ + + This intrinisc performs four rounds of SM4 encryption. The intrinisc operates on independent 128-bit lanes. The calculated results are stored in "dst". + + +BYTE sbox[256] = { +0xD6, 0x90, 0xE9, 0xFE, 0xCC, 0xE1, 0x3D, 0xB7, 0x16, 0xB6, 0x14, 0xC2, 0x28, 0xFB, 0x2C, 0x05, +0x2B, 0x67, 0x9A, 0x76, 0x2A, 0xBE, 0x04, 0xC3, 0xAA, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99, +0x9C, 0x42, 0x50, 0xF4, 0x91, 0xEF, 0x98, 0x7A, 0x33, 0x54, 0x0B, 0x43, 0xED, 0xCF, 0xAC, 0x62, +0xE4, 0xB3, 0x1C, 0xA9, 0xC9, 0x08, 0xE8, 0x95, 0x80, 0xDF, 0x94, 0xFA, 0x75, 0x8F, 0x3F, 0xA6, +0x47, 0x07, 0xA7, 0xFC, 0xF3, 0x73, 0x17, 0xBA, 0x83, 0x59, 0x3C, 0x19, 0xE6, 0x85, 0x4F, 0xA8, +0x68, 0x6B, 0x81, 0xB2, 0x71, 0x64, 0xDA, 0x8B, 0xF8, 0xEB, 0x0F, 0x4B, 0x70, 0x56, 0x9D, 0x35, +0x1E, 0x24, 0x0E, 0x5E, 0x63, 0x58, 0xD1, 0xA2, 0x25, 0x22, 0x7C, 0x3B, 0x01, 0x21, 0x78, 0x87, +0xD4, 0x00, 0x46, 0x57, 0x9F, 0xD3, 0x27, 0x52, 0x4C, 0x36, 0x02, 0xE7, 0xA0, 0xC4, 0xC8, 0x9E, +0xEA, 0xBF, 0x8A, 0xD2, 0x40, 0xC7, 0x38, 0xB5, 0xA3, 0xF7, 0xF2, 0xCE, 0xF9, 0x61, 0x15, 0xA1, +0xE0, 0xAE, 0x5D, 0xA4, 0x9B, 0x34, 0x1A, 0x55, 0xAD, 0x93, 0x32, 0x30, 0xF5, 0x8C, 0xB1, 0xE3, +0x1D, 0xF6, 0xE2, 0x2E, 0x82, 0x66, 0xCA, 0x60, 0xC0, 0x29, 0x23, 0xAB, 0x0D, 0x53, 0x4E, 0x6F, +0xD5, 0xDB, 0x37, 0x45, 0xDE, 0xFD, 0x8E, 0x2F, 0x03, 0xFF, 0x6A, 0x72, 0x6D, 0x6C, 0x5B, 0x51, +0x8D, 0x1B, 0xAF, 0x92, 0xBB, 0xDD, 0xBC, 0x7F, 0x11, 0xD9, 0x5C, 0x41, 0x1F, 0x10, 0x5A, 0xD8, +0x0A, 0xC1, 0x31, 0x88, 0xA5, 0xCD, 0x7B, 0xBD, 0x2D, 0x74, 0xD0, 0x12, 0xB8, 0xE5, 0xB4, 0xB0, +0x89, 0x69, 0x97, 0x4A, 0x0C, 0x96, 0x77, 0x7E, 0x65, 0xB9, 0xF1, 0x09, 0xC5, 0x6E, 0xC6, 0x84, +0x18, 0xF0, 0x7D, 0xEC, 0x3A, 0xDC, 0x4D, 0x20, 0x79, 0xEE, 0x5F, 0x3E, 0xD7, 0xCB, 0x39, 0x48 +} +DEFINE ROL32(dword, n) { + count := n % 32 + dest := (dword << count) | (dword >> (32-count)) + RETURN dest +} +DEFINE SBOX_BYTE(dword, i) { + RETURN sbox[dword.byte[i]] +} +DEFINE lower_t(dword) { + tmp.byte[0] := SBOX_BYTE(dword, 0) + tmp.byte[1] := SBOX_BYTE(dword, 1) + tmp.byte[2] := SBOX_BYTE(dword, 2) + tmp.byte[3] := SBOX_BYTE(dword, 3) + RETURN tmp +} +DEFINE L_RND(dword) { + tmp := dword + tmp := tmp ^ ROL32(dword, 2) + tmp := tmp ^ ROL32(dword, 10) + tmp := tmp ^ ROL32(dword, 18) + tmp := tmp ^ ROL32(dword, 24) + RETURN tmp +} +DEFINE T_RND(dword) { + RETURN L_RND(lower_t(dword)) +} +DEFINE F_RND(X0, X1, X2, X3, round_key) { + RETURN X0 ^ T_RND(X1 ^ X2 ^ X3 ^ round_key) +} +P.dword[0] := __A.dword[0] +P.dword[1] := __A.dword[1] +P.dword[2] := __A.dword[2] +P.dword[3] := __A.dword[3] +C.dword[0] := F_RND(P.dword[0], P.dword[1], P.dword[2], P.dword[3], __B.dword[0]) +C.dword[1] := F_RND(P.dword[1], P.dword[2], P.dword[3], C.dword[0], __B.dword[1]) +C.dword[2] := F_RND(P.dword[2], P.dword[3], C.dword[0], C.dword[1], __B.dword[2]) +C.dword[3] := F_RND(P.dword[3], C.dword[0], C.dword[1], C.dword[2], __B.dword[3]) +dst.dword[0] := C.dword[0] +dst.dword[1] := C.dword[1] +dst.dword[2] := C.dword[2] +dst.dword[3] := C.dword[3] +dst[MAX:128] := 0 - - - SM4 -
immintrin.h
- Cryptography -
- - - - Compute the inverse cosine of packed double-precision (64-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := ACOS(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + SM4 + AVX +
immintrin.h
+ Cryptography +
+ + + + Compute the inverse cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ACOS(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the inverse cosine of packed single-precision (32-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ACOS(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the inverse cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ACOS(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the inverse hyperbolic cosine of packed double-precision (64-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := ACOSH(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the inverse hyperbolic cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ACOSH(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the inverse hyperbolic cosine of packed single-precision (32-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ACOSH(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the inverse hyperbolic cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ACOSH(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the inverse sine of packed double-precision (64-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := ASIN(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the inverse sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ASIN(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the inverse sine of packed single-precision (32-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ASIN(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the inverse sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ASIN(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the inverse hyperbolic sine of packed double-precision (64-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := ASINH(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the inverse hyperbolic sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ASINH(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the inverse hyperbolic sine of packed single-precision (32-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ASINH(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the inverse hyperbolic sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ASINH(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the inverse tangent of packed double-precision (64-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := ATAN(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the inverse tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ATAN(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the inverse tangent of packed single-precision (32-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ATAN(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the inverse tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ATAN(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - - Compute the inverse tangent of packed double-precision (64-bit) floating-point - elements in "a" divided by packed elements in "b", and store the results in "dst" - expressed in radians. - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := ATAN2(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + + Compute the inverse tangent of packed double-precision (64-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians. + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ATAN2(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - - Compute the inverse tangent of packed single-precision (32-bit) floating-point - elements in "a" divided by packed elements in "b", and store the results in "dst" - expressed in radians. - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ATAN2(a[i+31:i], b[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + + Compute the inverse tangent of packed single-precision (32-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians. + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ATAN2(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the inverse hyperbolic tangent of packed double-precision (64-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := ATANH(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the inverse hyperbolic tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ATANH(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the inverse hyperbolic tangent of packed single-precision (32-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ATANH(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the inverse hyperbolic tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ATANH(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the cosine of packed double-precision (64-bit) floating-point elements - in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := COS(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := COS(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the cosine of packed single-precision (32-bit) floating-point elements - in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := COS(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := COS(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the cosine of packed double-precision (64-bit) floating-point elements - in "a" expressed in degrees, and store the results in "dst". - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := COSD(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := COSD(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the cosine of packed single-precision (32-bit) floating-point elements - in "a" expressed in degrees, and store the results in "dst". - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := COSD(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := COSD(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the hyperbolic cosine of packed double-precision (64-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := COSH(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the hyperbolic cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := COSH(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the hyperbolic cosine of packed single-precision (32-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := COSH(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the hyperbolic cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := COSH(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - - Compute the length of the hypotenous of a right triangle, with the lengths of - the other two sides of the triangle stored as packed double-precision (64-bit) - floating-point elements in "a" and "b", and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := SQRT(POW(a[i+63:i], 2.0) + POW(b[i+63:i], 2.0)) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + + Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := SQRT(POW(a[i+63:i], 2.0) + POW(b[i+63:i], 2.0)) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - - Compute the length of the hypotenous of a right triangle, with the lengths of - the other two sides of the triangle stored as packed single-precision (32-bit) - floating-point elements in "a" and "b", and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := SQRT(POW(a[i+31:i], 2.0) + POW(b[i+31:i], 2.0)) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + + Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := SQRT(POW(a[i+31:i], 2.0) + POW(b[i+31:i], 2.0)) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the sine of packed double-precision (64-bit) floating-point elements in - "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := SIN(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := SIN(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the sine of packed single-precision (32-bit) floating-point elements in - "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := SIN(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := SIN(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - - Compute the sine and cosine of packed double-precision (64-bit) floating-point - elements in "a" expressed in radians, store the sine in "dst", and store the cosine into - memory at "mem_addr". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := SIN(a[i+63:i]) - MEM[mem_addr+i+63:mem_addr+i] := COS(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + + Compute the sine and cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, store the sine in "dst", and store the cosine into memory at "mem_addr". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := SIN(a[i+63:i]) + MEM[mem_addr+i+63:mem_addr+i] := COS(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - - Compute the sine and cosine of packed single-precision (32-bit) floating-point - elements in "a" expressed in radians, store the sine in "dst", and store the cosine into - memory at "mem_addr". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := SIN(a[i+31:i]) - MEM[mem_addr+i+31:mem_addr+i] := COS(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + + Compute the sine and cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, store the sine in "dst", and store the cosine into memory at "mem_addr". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := SIN(a[i+31:i]) + MEM[mem_addr+i+31:mem_addr+i] := COS(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the sine of packed double-precision (64-bit) floating-point elements in - "a" expressed in degrees, and store the results in "dst". - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := SIND(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the sine of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := SIND(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the sine of packed single-precision (32-bit) floating-point elements in - "a" expressed in degrees, and store the results in "dst". - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := SIND(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the sine of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := SIND(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the hyperbolic sine of packed double-precision (64-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := SINH(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the hyperbolic sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := SINH(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the hyperbolic sine of packed single-precision (32-bit) floating-point - elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := SINH(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the hyperbolic sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := SINH(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the tangent of packed double-precision (64-bit) floating-point elements - in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := TAN(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := TAN(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the tangent of packed single-precision (32-bit) floating-point elements - in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := TAN(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := TAN(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the tangent of packed double-precision (64-bit) floating-point elements - in "a" expressed in degrees, and store the results in "dst". - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := TAND(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := TAND(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the tangent of packed single-precision (32-bit) floating-point elements - in "a" expressed in degrees, and store the results in "dst". - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := TAND(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst". + FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := TAND(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the hyperbolic tangent of packed double-precision (64-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := TANH(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the hyperbolic tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := TANH(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the hyperbolic tangent of packed single-precision (32-bit) - floating-point elements in "a" expressed in radians, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := TANH(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Trigonometry + + + Compute the hyperbolic tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := TANH(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Trigonometry
- - - Compute the cube root of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst". - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := CubeRoot(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the cube root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := CubeRoot(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the cube root of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst". - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := CubeRoot(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the cube root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := CubeRoot(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the exponential value of "e" raised to the power of packed complex - numbers in "a", and store the complex results in "dst". Each complex number is composed - of two adjacent single-precision (32-bit) floating-point elements, which defines the - complex number "complex = vec.fp32[0] + i * vec.fp32[1]". - - DEFINE CEXP(a[31:0], b[31:0]) { - result[31:0] := POW(FP32(e), a[31:0]) * COS(b[31:0]) - result[63:32] := POW(FP32(e), a[31:0]) * SIN(b[31:0]) - RETURN result - } - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := CEXP(a[i+31:i], a[i+63:i+32]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the exponential value of "e" raised to the power of packed complex numbers in "a", and store the complex results in "dst". Each complex number is composed of two adjacent single-precision (32-bit) floating-point elements, which defines the complex number "complex = vec.fp32[0] + i * vec.fp32[1]". + +DEFINE CEXP(a[31:0], b[31:0]) { + result[31:0] := POW(FP32(e), a[31:0]) * COS(b[31:0]) + result[63:32] := POW(FP32(e), a[31:0]) * SIN(b[31:0]) + RETURN result +} +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := CEXP(a[i+31:i], a[i+63:i+32]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the natural logarithm of packed complex numbers in "a", and store the - complex results in "dst". Each complex number is composed of two adjacent - single-precision (32-bit) floating-point elements, which defines the complex number - "complex = vec.fp32[0] + i * vec.fp32[1]". - - DEFINE CLOG(a[31:0], b[31:0]) { - result[31:0] := LOG(SQRT(POW(a, 2.0) + POW(b, 2.0))) - result[63:32] := ATAN2(b, a) - RETURN result - } - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := CLOG(a[i+31:i], a[i+63:i+32]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the natural logarithm of packed complex numbers in "a", and store the complex results in "dst". Each complex number is composed of two adjacent single-precision (32-bit) floating-point elements, which defines the complex number "complex = vec.fp32[0] + i * vec.fp32[1]". + +DEFINE CLOG(a[31:0], b[31:0]) { + result[31:0] := LOG(SQRT(POW(a, 2.0) + POW(b, 2.0))) + result[63:32] := ATAN2(b, a) + RETURN result +} +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := CLOG(a[i+31:i], a[i+63:i+32]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the square root of packed complex snumbers in "a", and store the - complex results in "dst". Each complex number is composed of two adjacent - single-precision (32-bit) floating-point elements, which defines the complex number - "complex = vec.fp32[0] + i * vec.fp32[1]". - - DEFINE CSQRT(a[31:0], b[31:0]) { - sign[31:0] := (b < 0.0) ? -FP32(1.0) : FP32(1.0) - result[31:0] := SQRT((a + SQRT(POW(a, 2.0) + POW(b, 2.0))) / 2.0) - result[63:32] := sign * SQRT((-a + SQRT(POW(a, 2.0) + POW(b, 2.0))) / 2.0) - RETURN result - } - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := CSQRT(a[i+31:i], a[i+63:i+32]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the square root of packed complex snumbers in "a", and store the complex results in "dst". Each complex number is composed of two adjacent single-precision (32-bit) floating-point elements, which defines the complex number "complex = vec.fp32[0] + i * vec.fp32[1]". + +DEFINE CSQRT(a[31:0], b[31:0]) { + sign[31:0] := (b < 0.0) ? -FP32(1.0) : FP32(1.0) + result[31:0] := SQRT((a + SQRT(POW(a, 2.0) + POW(b, 2.0))) / 2.0) + result[63:32] := sign * SQRT((-a + SQRT(POW(a, 2.0) + POW(b, 2.0))) / 2.0) + RETURN result +} +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := CSQRT(a[i+31:i], a[i+63:i+32]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the exponential value of "e" raised to the power of packed - double-precision (64-bit) floating-point elements in "a", and store the results in - "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := POW(e, a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the exponential value of "e" raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := POW(e, a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the exponential value of "e" raised to the power of packed - single-precision (32-bit) floating-point elements in "a", and store the results in - "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := POW(FP32(e), a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the exponential value of "e" raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := POW(FP32(e), a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the exponential value of 10 raised to the power of packed - double-precision (64-bit) floating-point elements in "a", and store the results in - "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := POW(10.0, a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the exponential value of 10 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := POW(10.0, a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the exponential value of 10 raised to the power of packed - single-precision (32-bit) floating-point elements in "a", and store the results in - "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := POW(FP32(10.0), a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the exponential value of 10 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := POW(FP32(10.0), a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the exponential value of 2 raised to the power of packed - double-precision (64-bit) floating-point elements in "a", and store the results in - "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := POW(2.0, a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the exponential value of 2 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := POW(2.0, a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the exponential value of 2 raised to the power of packed - single-precision (32-bit) floating-point elements in "a", and store the results in - "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := POW(FP32(2.0), a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the exponential value of 2 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := POW(FP32(2.0), a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the exponential value of "e" raised to the power of packed - double-precision (64-bit) floating-point elements in "a", subtract one from each - element, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := POW(e, a[i+63:i]) - 1.0 - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the exponential value of "e" raised to the power of packed double-precision (64-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := POW(e, a[i+63:i]) - 1.0 +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the exponential value of "e" raised to the power of packed - single-precision (32-bit) floating-point elements in "a", subtract one from each - element, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := POW(FP32(e), a[i+31:i]) - 1.0 - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the exponential value of "e" raised to the power of packed single-precision (32-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := POW(FP32(e), a[i+31:i]) - 1.0 +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the inverse cube root of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := InvCubeRoot(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the inverse cube root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := InvCubeRoot(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the inverse cube root of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := InvCubeRoot(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the inverse cube root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := InvCubeRoot(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the inverse square root of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := InvSQRT(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the inverse square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := InvSQRT(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the inverse square root of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := InvSQRT(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the inverse square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := InvSQRT(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the natural logarithm of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := LOG(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the natural logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := LOG(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the natural logarithm of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := LOG(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the natural logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := LOG(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the base-10 logarithm of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := LOG(a[i+63:i]) / LOG(10.0) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the base-10 logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := LOG(a[i+63:i]) / LOG(10.0) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the base-10 logarithm of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := LOG(a[i+31:i]) / LOG(10.0) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the base-10 logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := LOG(a[i+31:i]) / LOG(10.0) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the natural logarithm of one plus packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := LOG(1.0 + a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the natural logarithm of one plus packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := LOG(1.0 + a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the natural logarithm of one plus packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := LOG(1.0 + a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the natural logarithm of one plus packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := LOG(1.0 + a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the base-2 logarithm of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := LOG(a[i+63:i]) / LOG(2.0) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the base-2 logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := LOG(a[i+63:i]) / LOG(2.0) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the base-2 logarithm of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := LOG(a[i+31:i]) / LOG(2.0) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the base-2 logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := LOG(a[i+31:i]) / LOG(2.0) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Convert the exponent of each packed double-precision (64-bit) floating-point - element in "a" to a double-precision floating-point number representing the integer - exponent, and store the results in "dst". This intrinsic essentially calculates - "floor(log2(x))" for each element. - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := ConvertExpFP64(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ConvertExpFP64(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Convert the exponent of each packed single-precision (32-bit) floating-point - element in "a" to a single-precision floating-point number representing the integer - exponent, and store the results in "dst". This intrinsic essentially calculates - "floor(log2(x))" for each element. - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ConvertExpFP32(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element. + FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ConvertExpFP32(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - - Compute the exponential value of packed double-precision (64-bit) - floating-point elements in "a" raised by packed elements in "b", and store the results - in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := POW(a[i+63:i], b[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + + Compute the exponential value of packed double-precision (64-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := POW(a[i+63:i], b[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - - Compute the exponential value of packed single-precision (32-bit) - floating-point elements in "a" raised by packed elements in "b", and store the results - in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := POW(a[i+31:i], b[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + + Compute the exponential value of packed single-precision (32-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := POW(a[i+31:i], b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the square root of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst". Note that this intrinsic is less - efficient than "_mm_sqrt_pd". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := SQRT(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". Note that this intrinsic is less efficient than "_mm_sqrt_pd". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := SQRT(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the square root of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst". Note that this intrinsic is less - efficient than "_mm_sqrt_ps". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := SQRT(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Elementary Math Functions + + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". Note that this intrinsic is less efficient than "_mm_sqrt_ps". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := SQRT(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Elementary Math Functions
- - - Compute the cumulative distribution function of packed double-precision - (64-bit) floating-point elements in "a" using the normal distribution, and store the - results in "dst". - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := CDFNormal(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Probability/Statistics + + + Compute the cumulative distribution function of packed double-precision (64-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". + FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := CDFNormal(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Probability/Statistics
- - - Compute the cumulative distribution function of packed single-precision - (32-bit) floating-point elements in "a" using the normal distribution, and store the - results in "dst". - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := CDFNormal(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Probability/Statistics + + + Compute the cumulative distribution function of packed single-precision (32-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". + FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := CDFNormal(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Probability/Statistics
- - - Compute the inverse cumulative distribution function of packed double-precision - (64-bit) floating-point elements in "a" using the normal distribution, and store the - results in "dst". - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := InverseCDFNormal(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Probability/Statistics + + + Compute the inverse cumulative distribution function of packed double-precision (64-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". + FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := InverseCDFNormal(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Probability/Statistics
- - - Compute the inverse cumulative distribution function of packed single-precision - (32-bit) floating-point elements in "a" using the normal distribution, and store the - results in "dst". - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := InverseCDFNormal(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Probability/Statistics + + + Compute the inverse cumulative distribution function of packed single-precision (32-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst". + FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := InverseCDFNormal(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Probability/Statistics
- - - Compute the error function of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst". - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ERF(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Probability/Statistics + + + Compute the error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ERF(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Probability/Statistics
- - - Compute the complementary error function of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := 1.0 - ERF(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Probability/Statistics + + + Compute the complementary error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := 1.0 - ERF(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Probability/Statistics
- - - Compute the complementary error function of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 3 - i := j*32 - dst[i+63:i] := 1.0 - ERF(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Probability/Statistics + + + Compute the complementary error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 3 + i := j*32 + dst[i+63:i] := 1.0 - ERF(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Probability/Statistics
- - - Compute the inverse complementary error function of packed double-precision - (64-bit) floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+63:i])) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Probability/Statistics + + + Compute the inverse complementary error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+63:i])) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Probability/Statistics
- - - Compute the inverse complementary error function of packed single-precision - (32-bit) floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 3 - i := j*32 - dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+31:i])) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Probability/Statistics + + + Compute the inverse complementary error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 3 + i := j*32 + dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+31:i])) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Probability/Statistics
- - - Compute the inverse error function of packed double-precision (64-bit) - floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := 1.0 / ERF(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Probability/Statistics + + + Compute the inverse error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := 1.0 / ERF(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Probability/Statistics
- - - Compute the inverse error function of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst". - FOR j := 0 to 3 - i := j*32 - dst[i+63:i] := 1.0 / ERF(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Probability/Statistics + + + Compute the inverse error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 3 + i := j*32 + dst[i+63:i] := 1.0 / ERF(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Probability/Statistics
- - - - Divide packed signed 8-bit integers in "a" by packed elements in "b", and store - the truncated results in "dst". - - FOR j := 0 to 15 - i := 8*j - IF b[i+7:i] == 0 - #DE - FI - dst[i+7:i] := Truncate8(a[i+7:i] / b[i+7:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Arithmetic + + + + Divide packed signed 8-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 15 + i := 8*j + IF b[i+7:i] == 0 + #DE + FI + dst[i+7:i] := Truncate8(a[i+7:i] / b[i+7:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Arithmetic
- - - - Divide packed signed 16-bit integers in "a" by packed elements in "b", and - store the truncated results in "dst". - - FOR j := 0 to 7 - i := 16*j - IF b[i+15:i] == 0 - #DE - FI - dst[i+15:i] := Truncate16(a[i+15:i] / b[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Arithmetic + + + + Divide packed signed 16-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 7 + i := 16*j + IF b[i+15:i] == 0 + #DE + FI + dst[i+15:i] := Truncate16(a[i+15:i] / b[i+15:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Arithmetic
- - - - Divide packed 32-bit integers in "a" by packed elements in "b", and store the - truncated results in "dst". - - FOR j := 0 to 3 - i := 32*j - IF b[i+31:i] == 0 - #DE - FI - dst[i+31:i] := Truncate32(a[i+31:i] / b[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Arithmetic + + + + Divide packed 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 3 + i := 32*j + IF b[i+31:i] == 0 + #DE + FI + dst[i+31:i] := Truncate32(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Arithmetic
- - - - Divide packed signed 64-bit integers in "a" by packed elements in "b", and - store the truncated results in "dst". - - FOR j := 0 to 1 - i := 64*j - IF b[i+63:i] == 0 - #DE - FI - dst[i+63:i] := Truncate64(a[i+63:i] / b[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Arithmetic + + + + Divide packed signed 64-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 1 + i := 64*j + IF b[i+63:i] == 0 + #DE + FI + dst[i+63:i] := Truncate64(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 8-bit integers in "a" by packed elements in "b", and - store the truncated results in "dst". - - FOR j := 0 to 15 - i := 8*j - IF b[i+7:i] == 0 - #DE - FI - dst[i+7:i] := Truncate8(a[i+7:i] / b[i+7:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 8-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 15 + i := 8*j + IF b[i+7:i] == 0 + #DE + FI + dst[i+7:i] := Truncate8(a[i+7:i] / b[i+7:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 16-bit integers in "a" by packed elements in "b", and - store the truncated results in "dst". - - FOR j := 0 to 7 - i := 16*j - IF b[i+15:i] == 0 - #DE - FI - dst[i+15:i] := Truncate16(a[i+15:i] / b[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 16-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 7 + i := 16*j + IF b[i+15:i] == 0 + #DE + FI + dst[i+15:i] := Truncate16(a[i+15:i] / b[i+15:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and - store the truncated results in "dst". - - FOR j := 0 to 3 - i := 32*j - IF b[i+31:i] == 0 - #DE - FI - dst[i+31:i] := Truncate32(a[i+31:i] / b[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 3 + i := 32*j + IF b[i+31:i] == 0 + #DE + FI + dst[i+31:i] := Truncate32(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 64-bit integers in "a" by packed elements in "b", and - store the truncated results in "dst". - - FOR j := 0 to 1 - i := 64*j - IF b[i+63:i] == 0 - #DE - FI - dst[i+63:i] := Truncate64(a[i+63:i] / b[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 64-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + +FOR j := 0 to 1 + i := 64*j + IF b[i+63:i] == 0 + #DE + FI + dst[i+63:i] := Truncate64(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Arithmetic
- - - Compute the error function of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst". - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := ERF(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Arithmetic + + + Compute the error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ERF(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Arithmetic
- - - - Divide packed 32-bit integers in "a" by packed elements in "b", and store the - truncated results in "dst". - FOR j := 0 to 3 - i := 32*j - dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Arithmetic + + + + Divide packed 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Arithmetic
- - - - - Divide packed 32-bit integers in "a" by packed elements in "b", store the - truncated results in "dst", and store the remainders as packed 32-bit integers into - memory at "mem_addr". - FOR j := 0 to 3 - i := 32*j - dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) - MEM[mem_addr+i+31:mem_addr+i] := REMAINDER(a[i+31:i] / b[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Arithmetic + + + + + Divide packed 32-bit integers in "a" by packed elements in "b", store the truncated results in "dst", and store the remainders as packed 32-bit integers into memory at "mem_addr". + FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) + MEM[mem_addr+i+31:mem_addr+i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Arithmetic
- - - - Divide packed 32-bit integers in "a" by packed elements in "b", and store the - remainders as packed 32-bit integers in "dst". - FOR j := 0 to 3 - i := 32*j - dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Arithmetic + + + + Divide packed 32-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Arithmetic
- - - - Divide packed 8-bit integers in "a" by packed elements in "b", and store the - remainders as packed 32-bit integers in "dst". - FOR j := 0 to 15 - i := 8*j - dst[i+7:i] := REMAINDER(a[i+7:i] / b[i+7:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Arithmetic + + + + Divide packed 8-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + FOR j := 0 to 15 + i := 8*j + dst[i+7:i] := REMAINDER(a[i+7:i] / b[i+7:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Arithmetic
- - - - Divide packed 16-bit integers in "a" by packed elements in "b", and store the - remainders as packed 32-bit integers in "dst". - FOR j := 0 to 7 - i := 16*j - dst[i+15:i] := REMAINDER(a[i+15:i] / b[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Arithmetic + + + + Divide packed 16-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + FOR j := 0 to 7 + i := 16*j + dst[i+15:i] := REMAINDER(a[i+15:i] / b[i+15:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Arithmetic
- - - - Divide packed 32-bit integers in "a" by packed elements in "b", and store the - remainders as packed 32-bit integers in "dst". - FOR j := 0 to 3 - i := 32*j - dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Arithmetic + + + + Divide packed 32-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Arithmetic
- - - - Divide packed 64-bit integers in "a" by packed elements in "b", and store the - remainders as packed 32-bit integers in "dst". - FOR j := 0 to 1 - i := 64*j - dst[i+63:i] := REMAINDER(a[i+63:i] / b[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Arithmetic + + + + Divide packed 64-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst". + FOR j := 0 to 1 + i := 64*j + dst[i+63:i] := REMAINDER(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 8-bit integers in "a" by packed elements in "b", and - store the remainders as packed unsigned 32-bit integers in "dst". - FOR j := 0 to 15 - i := 8*j - dst[i+7:i] := REMAINDER(a[i+7:i] / b[i+7:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 8-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + FOR j := 0 to 15 + i := 8*j + dst[i+7:i] := REMAINDER(a[i+7:i] / b[i+7:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 16-bit integers in "a" by packed elements in "b", and - store the remainders as packed unsigned 32-bit integers in "dst". - FOR j := 0 to 7 - i := 16*j - dst[i+15:i] := REMAINDER(a[i+15:i] / b[i+15:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 16-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + FOR j := 0 to 7 + i := 16*j + dst[i+15:i] := REMAINDER(a[i+15:i] / b[i+15:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and - store the remainders as packed unsigned 32-bit integers in "dst". - FOR j := 0 to 3 - i := 32*j - dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 64-bit integers in "a" by packed elements in "b", and - store the remainders as packed unsigned 32-bit integers in "dst". - FOR j := 0 to 1 - i := 64*j - dst[i+63:i] := REMAINDER(a[i+63:i] / b[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 64-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + FOR j := 0 to 1 + i := 64*j + dst[i+63:i] := REMAINDER(a[i+63:i] / b[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and - store the truncated results in "dst". - FOR j := 0 to 3 - i := 32*j - dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst". + FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Arithmetic
- - - - - Divide packed unsigned 32-bit integers in "a" by packed elements in "b", store - the truncated results in "dst", and store the remainders as packed unsigned 32-bit - integers into memory at "mem_addr". - FOR j := 0 to 3 - i := 32*j - dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) - MEM[mem_addr+i+31:mem_addr+i] := REMAINDER(a[i+31:i] / b[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Arithmetic + + + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", store the truncated results in "dst", and store the remainders as packed unsigned 32-bit integers into memory at "mem_addr". + FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i]) + MEM[mem_addr+i+31:mem_addr+i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Arithmetic
- - - - Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and - store the remainders as packed unsigned 32-bit integers in "dst". - FOR j := 0 to 3 - i := 32*j - dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Arithmetic + + + + Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst". + FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Arithmetic
- - - Round the packed double-precision (64-bit) floating-point elements in "a" up to - an integer value, and store the results as packed double-precision floating-point - elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction. - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := CEIL(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Special Math Functions + + + Round the packed double-precision (64-bit) floating-point elements in "a" up to an integer value, and store the results as packed double-precision floating-point elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction. + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := CEIL(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Special Math Functions
- - - Round the packed single-precision (32-bit) floating-point elements in "a" up to - an integer value, and store the results as packed single-precision floating-point - elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction. - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := CEIL(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Special Math Functions + + + Round the packed single-precision (32-bit) floating-point elements in "a" up to an integer value, and store the results as packed single-precision floating-point elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction. + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := CEIL(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Special Math Functions
- - - Round the packed double-precision (64-bit) floating-point elements in "a" down - to an integer value, and store the results as packed double-precision floating-point - elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction. - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := FLOOR(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Special Math Functions + + + Round the packed double-precision (64-bit) floating-point elements in "a" down to an integer value, and store the results as packed double-precision floating-point elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction. + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := FLOOR(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Special Math Functions
- - - Round the packed single-precision (32-bit) floating-point elements in "a" down - to an integer value, and store the results as packed single-precision floating-point - elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction. - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := FLOOR(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Special Math Functions + + + Round the packed single-precision (32-bit) floating-point elements in "a" down to an integer value, and store the results as packed single-precision floating-point elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction. + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := FLOOR(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Special Math Functions
- - - Round the packed double-precision (64-bit) floating-point elements in "a" to - the nearest integer value, and store the results as packed double-precision - floating-point elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" - instruction. - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := ROUND(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Special Math Functions + + + Round the packed double-precision (64-bit) floating-point elements in "a" to the nearest integer value, and store the results as packed double-precision floating-point elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction. + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ROUND(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Special Math Functions
- - - Round the packed single-precision (32-bit) floating-point elements in "a" to - the nearest integer value, and store the results as packed single-precision - floating-point elements in "dst". This intrinsic may generate the "roundps"/"vroundps" - instruction. - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ROUND(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Special Math Functions + + + Round the packed single-precision (32-bit) floating-point elements in "a" to the nearest integer value, and store the results as packed single-precision floating-point elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction. + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ROUND(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Special Math Functions
- - - Truncate the packed double-precision (64-bit) floating-point elements in "a", - and store the results as packed double-precision floating-point elements in "dst". This - intrinsic may generate the "roundpd"/"vroundpd" instruction. - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := TRUNCATE(a[i+63:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Miscellaneous + + + Truncate the packed double-precision (64-bit) floating-point elements in "a", and store the results as packed double-precision floating-point elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction. + FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := TRUNCATE(a[i+63:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Miscellaneous
- - - Truncate the packed single-precision (32-bit) floating-point elements in "a", - and store the results as packed single-precision floating-point elements in "dst". This - intrinsic may generate the "roundps"/"vroundps" instruction. - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := TRUNCATE(a[i+31:i]) - ENDFOR - dst[MAX:128] := 0 - - SSE -
immintrin.h
- Miscellaneous -
- - - - - - - - - Macro: Transpose the 4x4 matrix formed by the 4 rows of single-precision - (32-bit) floating-point elements in "row0", "row1", "row2", and "row3", and store the - transposed matrix in these vectors ("row0" now contains column 0, etc.). - - __m128 tmp3, tmp2, tmp1, tmp0; - tmp0 := _mm_unpacklo_ps(row0, row1); - tmp2 := _mm_unpacklo_ps(row2, row3); - tmp1 := _mm_unpackhi_ps(row0, row1); - tmp3 := _mm_unpackhi_ps(row2, row3); - row0 := _mm_movelh_ps(tmp0, tmp2); - row1 := _mm_movehl_ps(tmp2, tmp0); - row2 := _mm_movelh_ps(tmp1, tmp3); - row3 := _mm_movehl_ps(tmp3, tmp1); - - SSE -
xmmintrin.h
- Swizzle + + + Truncate the packed single-precision (32-bit) floating-point elements in "a", and store the results as packed single-precision floating-point elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction. + FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := TRUNCATE(a[i+31:i]) +ENDFOR +dst[MAX:128] := 0 + + SSE +
immintrin.h
+ Miscellaneous +
+ + + + + + + + + Macro: Transpose the 4x4 matrix formed by the 4 rows of single-precision (32-bit) floating-point elements in "row0", "row1", "row2", and "row3", and store the transposed matrix in these vectors ("row0" now contains column 0, etc.). + +__m128 tmp3, tmp2, tmp1, tmp0; +tmp0 := _mm_unpacklo_ps(row0, row1); +tmp2 := _mm_unpacklo_ps(row2, row3); +tmp1 := _mm_unpackhi_ps(row0, row1); +tmp3 := _mm_unpackhi_ps(row2, row3); +row0 := _mm_movelh_ps(tmp0, tmp2); +row1 := _mm_movehl_ps(tmp2, tmp0); +row2 := _mm_movelh_ps(tmp1, tmp3); +row3 := _mm_movehl_ps(tmp3, tmp1); + + SSE +
xmmintrin.h
+ Swizzle
- - - - Extract a 16-bit integer from "a", selected with "imm8", and store the result - in the lower element of "dst". - - dst[15:0] := (a[63:0] >> (imm8[1:0] * 16))[15:0] - dst[31:16] := 0 - - - SSE -
xmmintrin.h
- Swizzle + + + + Extract a 16-bit integer from "a", selected with "imm8", and store the result in the lower element of "dst". + +dst[15:0] := (a[63:0] >> (imm8[1:0] * 16))[15:0] +dst[31:16] := 0 + + + SSE +
xmmintrin.h
+ Swizzle
- - - - Extract a 16-bit integer from "a", selected with "imm8", and store the result - in the lower element of "dst". - - dst[15:0] := (a[63:0] >> (imm8[1:0] * 16))[15:0] - dst[31:16] := 0 - - - SSE -
xmmintrin.h
- Swizzle + + + + Extract a 16-bit integer from "a", selected with "imm8", and store the result in the lower element of "dst". + +dst[15:0] := (a[63:0] >> (imm8[1:0] * 16))[15:0] +dst[31:16] := 0 + + + SSE +
xmmintrin.h
+ Swizzle
- - - - - Copy "a" to "dst", and insert the 16-bit integer "i" into "dst" at the location - specified by "imm8". - - dst[63:0] := a[63:0] - sel := imm8[1:0]*16 - dst[sel+15:sel] := i[15:0] - - - SSE -
xmmintrin.h
- Swizzle + + + + + Copy "a" to "dst", and insert the 16-bit integer "i" into "dst" at the location specified by "imm8". + +dst[63:0] := a[63:0] +sel := imm8[1:0]*16 +dst[sel+15:sel] := i[15:0] + + + SSE +
xmmintrin.h
+ Swizzle
- - - - - Copy "a" to "dst", and insert the 16-bit integer "i" into "dst" at the location - specified by "imm8". - - dst[63:0] := a[63:0] - sel := imm8[1:0]*16 - dst[sel+15:sel] := i[15:0] - - - SSE -
xmmintrin.h
- Swizzle + + + + + Copy "a" to "dst", and insert the 16-bit integer "i" into "dst" at the location specified by "imm8". + +dst[63:0] := a[63:0] +sel := imm8[1:0]*16 +dst[sel+15:sel] := i[15:0] + + + SSE +
xmmintrin.h
+ Swizzle
- - - - Shuffle 16-bit integers in "a" using the control in "imm8", and store the - results in "dst". - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[15:0] := src[15:0] - 1: tmp[15:0] := src[31:16] - 2: tmp[15:0] := src[47:32] - 3: tmp[15:0] := src[63:48] - ESAC - RETURN tmp[15:0] - } - dst[15:0] := SELECT4(a[63:0], imm8[1:0]) - dst[31:16] := SELECT4(a[63:0], imm8[3:2]) - dst[47:32] := SELECT4(a[63:0], imm8[5:4]) - dst[63:48] := SELECT4(a[63:0], imm8[7:6]) - - - SSE -
xmmintrin.h
- Swizzle + + + + Shuffle 16-bit integers in "a" using the control in "imm8", and store the results in "dst". + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[15:0] := src[15:0] + 1: tmp[15:0] := src[31:16] + 2: tmp[15:0] := src[47:32] + 3: tmp[15:0] := src[63:48] + ESAC + RETURN tmp[15:0] +} +dst[15:0] := SELECT4(a[63:0], imm8[1:0]) +dst[31:16] := SELECT4(a[63:0], imm8[3:2]) +dst[47:32] := SELECT4(a[63:0], imm8[5:4]) +dst[63:48] := SELECT4(a[63:0], imm8[7:6]) + + + SSE +
xmmintrin.h
+ Swizzle
- - - - Shuffle 16-bit integers in "a" using the control in "imm8", and store the - results in "dst". - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[15:0] := src[15:0] - 1: tmp[15:0] := src[31:16] - 2: tmp[15:0] := src[47:32] - 3: tmp[15:0] := src[63:48] - ESAC - RETURN tmp[15:0] - } - dst[15:0] := SELECT4(a[63:0], imm8[1:0]) - dst[31:16] := SELECT4(a[63:0], imm8[3:2]) - dst[47:32] := SELECT4(a[63:0], imm8[5:4]) - dst[63:48] := SELECT4(a[63:0], imm8[7:6]) - - - SSE -
xmmintrin.h
- Swizzle + + + + Shuffle 16-bit integers in "a" using the control in "imm8", and store the results in "dst". + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[15:0] := src[15:0] + 1: tmp[15:0] := src[31:16] + 2: tmp[15:0] := src[47:32] + 3: tmp[15:0] := src[63:48] + ESAC + RETURN tmp[15:0] +} +dst[15:0] := SELECT4(a[63:0], imm8[1:0]) +dst[31:16] := SELECT4(a[63:0], imm8[3:2]) +dst[47:32] := SELECT4(a[63:0], imm8[5:4]) +dst[63:48] := SELECT4(a[63:0], imm8[7:6]) + + + SSE +
xmmintrin.h
+ Swizzle
- - - - - Shuffle single-precision (32-bit) floating-point elements in "a" using the - control in "imm8", and store the results in "dst". - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - dst[95:64] := SELECT4(b[127:0], imm8[5:4]) - dst[127:96] := SELECT4(b[127:0], imm8[7:6]) - - - SSE -
xmmintrin.h
- Swizzle + + + + + Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst". + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +dst[95:64] := SELECT4(b[127:0], imm8[5:4]) +dst[127:96] := SELECT4(b[127:0], imm8[7:6]) + + + SSE +
xmmintrin.h
+ Swizzle
- - - - Unpack and interleave single-precision (32-bit) floating-point elements from - the high half "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[95:64] - dst[63:32] := src2[95:64] - dst[95:64] := src1[127:96] - dst[127:96] := src2[127:96] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) - - - SSE -
xmmintrin.h
- Swizzle + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the high half "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) + + + SSE +
xmmintrin.h
+ Swizzle
- - - - Unpack and interleave single-precision (32-bit) floating-point elements from - the low half of "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[31:0] - dst[63:32] := src2[31:0] - dst[95:64] := src1[63:32] - dst[127:96] := src2[63:32] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) - - - SSE -
xmmintrin.h
- Swizzle + + + + Unpack and interleave single-precision (32-bit) floating-point elements from the low half of "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) + + + SSE +
xmmintrin.h
+ Swizzle
- - - Get the unsigned 32-bit value of the MXCSR control and status register. - dst[31:0] := MXCSR - - - SSE -
immintrin.h
- General Support + + + Get the unsigned 32-bit value of the MXCSR control and status register. + dst[31:0] := MXCSR + + + SSE +
immintrin.h
+ General Support
- - - Set the MXCSR control and status register with the value in unsigned 32-bit - integer "a". - - MXCSR := a[31:0] - - - SSE -
immintrin.h
- General Support + + + Set the MXCSR control and status register with the value in unsigned 32-bit integer "a". + +MXCSR := a[31:0] + + + SSE +
immintrin.h
+ General Support
- - Macro: Get the exception state bits from the MXCSR control and status register. - The exception state may contain any of the following flags: _MM_EXCEPT_INVALID, - _MM_EXCEPT_DIV_ZERO, _MM_EXCEPT_DENORM, _MM_EXCEPT_OVERFLOW, _MM_EXCEPT_UNDERFLOW, - _MM_EXCEPT_INEXACT - dst[31:0] := MXCSR & _MM_EXCEPT_MASK - - SSE -
immintrin.h
- General Support + + Macro: Get the exception state bits from the MXCSR control and status register. The exception state may contain any of the following flags: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO, _MM_EXCEPT_DENORM, _MM_EXCEPT_OVERFLOW, _MM_EXCEPT_UNDERFLOW, _MM_EXCEPT_INEXACT + dst[31:0] := MXCSR & _MM_EXCEPT_MASK + + SSE +
immintrin.h
+ General Support
- - - Macro: Set the exception state bits of the MXCSR control and status register to - the value in unsigned 32-bit integer "a". The exception state may contain any of the - following flags: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO, _MM_EXCEPT_DENORM, - _MM_EXCEPT_OVERFLOW, _MM_EXCEPT_UNDERFLOW, _MM_EXCEPT_INEXACT - MXCSR := a[31:0] AND ~_MM_EXCEPT_MASK - - SSE -
immintrin.h
- General Support + + + Macro: Set the exception state bits of the MXCSR control and status register to the value in unsigned 32-bit integer "a". The exception state may contain any of the following flags: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO, _MM_EXCEPT_DENORM, _MM_EXCEPT_OVERFLOW, _MM_EXCEPT_UNDERFLOW, _MM_EXCEPT_INEXACT + MXCSR := a[31:0] AND ~_MM_EXCEPT_MASK + + SSE +
immintrin.h
+ General Support
- - Macro: Get the exception mask bits from the MXCSR control and status register. - The exception mask may contain any of the following flags: _MM_MASK_INVALID, - _MM_MASK_DIV_ZERO, _MM_MASK_DENORM, _MM_MASK_OVERFLOW, _MM_MASK_UNDERFLOW, - _MM_MASK_INEXACT - dst[31:0] := MXCSR & _MM_MASK_MASK - - SSE -
immintrin.h
- General Support + + Macro: Get the exception mask bits from the MXCSR control and status register. The exception mask may contain any of the following flags: _MM_MASK_INVALID, _MM_MASK_DIV_ZERO, _MM_MASK_DENORM, _MM_MASK_OVERFLOW, _MM_MASK_UNDERFLOW, _MM_MASK_INEXACT + dst[31:0] := MXCSR & _MM_MASK_MASK + + SSE +
immintrin.h
+ General Support
- - - Macro: Set the exception mask bits of the MXCSR control and status register to - the value in unsigned 32-bit integer "a". The exception mask may contain any of the - following flags: _MM_MASK_INVALID, _MM_MASK_DIV_ZERO, _MM_MASK_DENORM, - _MM_MASK_OVERFLOW, _MM_MASK_UNDERFLOW, _MM_MASK_INEXACT - MXCSR := a[31:0] AND ~_MM_MASK_MASK - - SSE -
immintrin.h
- General Support + + + Macro: Set the exception mask bits of the MXCSR control and status register to the value in unsigned 32-bit integer "a". The exception mask may contain any of the following flags: _MM_MASK_INVALID, _MM_MASK_DIV_ZERO, _MM_MASK_DENORM, _MM_MASK_OVERFLOW, _MM_MASK_UNDERFLOW, _MM_MASK_INEXACT + MXCSR := a[31:0] AND ~_MM_MASK_MASK + + SSE +
immintrin.h
+ General Support
- - Macro: Get the rounding mode bits from the MXCSR control and status register. - The rounding mode may contain any of the following flags: _MM_ROUND_NEAREST, - _MM_ROUND_DOWN, _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO - dst[31:0] := MXCSR & _MM_ROUND_MASK - - SSE -
immintrin.h
- General Support + + Macro: Get the rounding mode bits from the MXCSR control and status register. The rounding mode may contain any of the following flags: _MM_ROUND_NEAREST, _MM_ROUND_DOWN, _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO + dst[31:0] := MXCSR & _MM_ROUND_MASK + + SSE +
immintrin.h
+ General Support
- - - Macro: Set the rounding mode bits of the MXCSR control and status register to - the value in unsigned 32-bit integer "a". The rounding mode may contain any of the - following flags: _MM_ROUND_NEAREST, _MM_ROUND_DOWN, _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO - MXCSR := a[31:0] AND ~_MM_ROUND_MASK - - SSE -
immintrin.h
- General Support + + + Macro: Set the rounding mode bits of the MXCSR control and status register to the value in unsigned 32-bit integer "a". The rounding mode may contain any of the following flags: _MM_ROUND_NEAREST, _MM_ROUND_DOWN, _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO + MXCSR := a[31:0] AND ~_MM_ROUND_MASK + + SSE +
immintrin.h
+ General Support
- - Macro: Get the flush zero bits from the MXCSR control and status register. The - flush zero may contain any of the following flags: _MM_FLUSH_ZERO_ON or - _MM_FLUSH_ZERO_OFF - dst[31:0] := MXCSR & _MM_FLUSH_MASK - - SSE -
immintrin.h
- General Support + + Macro: Get the flush zero bits from the MXCSR control and status register. The flush zero may contain any of the following flags: _MM_FLUSH_ZERO_ON or _MM_FLUSH_ZERO_OFF + dst[31:0] := MXCSR & _MM_FLUSH_MASK + + SSE +
immintrin.h
+ General Support
- - - Macro: Set the flush zero bits of the MXCSR control and status register to the - value in unsigned 32-bit integer "a". The flush zero may contain any of the following - flags: _MM_FLUSH_ZERO_ON or _MM_FLUSH_ZERO_OFF - MXCSR := a[31:0] AND ~_MM_FLUSH_MASK - - SSE -
immintrin.h
- General Support + + + Macro: Set the flush zero bits of the MXCSR control and status register to the value in unsigned 32-bit integer "a". The flush zero may contain any of the following flags: _MM_FLUSH_ZERO_ON or _MM_FLUSH_ZERO_OFF + MXCSR := a[31:0] AND ~_MM_FLUSH_MASK + + SSE +
immintrin.h
+ General Support
- - - - Fetch the line of data from memory that contains address "p" to a location in - the cache hierarchy specified by the locality hint "i", which can be one of:<ul> - <li>_MM_HINT_T0 // 3, move data using the T0 hint. The PREFETCHT0 instruction will - be generated.</li> - <li>_MM_HINT_T1 // 2, move data using the T1 hint. The PREFETCHT1 instruction will - be generated.</li> - <li>_MM_HINT_T2 // 1, move data using the T2 hint. The PREFETCHT2 instruction will - be generated.</li> - <li>_MM_HINT_NTA // 0, move data using the non-temporal access (NTA) hint. The - PREFETCHNTA instruction will be generated.</li> + + + + Fetch the line of data from memory that contains address "p" to a location in the cache hierarchy specified by the locality hint "i", which can be one of:<ul> + <li>_MM_HINT_T0 // 3, move data using the T0 hint. The PREFETCHT0 instruction will be generated.</li> + <li>_MM_HINT_T1 // 2, move data using the T1 hint. The PREFETCHT1 instruction will be generated.</li> + <li>_MM_HINT_T2 // 1, move data using the T2 hint. The PREFETCHT2 instruction will be generated.</li> + <li>_MM_HINT_NTA // 0, move data using the non-temporal access (NTA) hint. The PREFETCHNTA instruction will be generated.</li> - - - - - SSE -
immintrin.h
- General Support + + + + + SSE +
immintrin.h
+ General Support
- - - Perform a serializing operation on all store-to-memory instructions that were - issued prior to this instruction. Guarantees that every store instruction that precedes, - in program order, is globally visible before any store instruction which follows the - fence in program order. - - SSE -
immintrin.h
- General Support + + + Perform a serializing operation on all store-to-memory instructions that were issued prior to this instruction. Guarantees that every store instruction that precedes, in program order, is globally visible before any store instruction which follows the fence in program order. + + SSE +
immintrin.h
+ General Support
- - - - Allocate "size" bytes of memory, aligned to the alignment specified in "align", - and return a pointer to the allocated memory. "_mm_free" should be used to free memory - that is allocated with "_mm_malloc". - SSE -
immintrin.h
- General Support + + + + Allocate "size" bytes of memory, aligned to the alignment specified in "align", and return a pointer to the allocated memory. "_mm_free" should be used to free memory that is allocated with "_mm_malloc". + SSE +
immintrin.h
+ General Support
- - - Free aligned memory that was allocated with "_mm_malloc". - SSE -
immintrin.h
- General Support + + + Free aligned memory that was allocated with "_mm_malloc". + SSE +
immintrin.h
+ General Support
- - - Return vector of type __m128 with undefined elements. - SSE -
immintrin.h
- General Support + + + Return vector of type __m128 with undefined elements. + SSE +
immintrin.h
+ General Support
- - - - Compare packed signed 16-bit integers in "a" and "b", and store packed maximum - values in "dst". - - FOR j := 0 to 3 - i := j*16 - dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) - ENDFOR - - - SSE -
xmmintrin.h
- Special Math Functions + + + + Compare packed signed 16-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) +ENDFOR + + + SSE +
xmmintrin.h
+ Special Math Functions
- - - - Compare packed signed 16-bit integers in "a" and "b", and store packed maximum - values in "dst". - - FOR j := 0 to 3 - i := j*16 - dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) - ENDFOR - - - SSE -
xmmintrin.h
- Special Math Functions + + + + Compare packed signed 16-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) +ENDFOR + + + SSE +
xmmintrin.h
+ Special Math Functions
- - - - Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum - values in "dst". - - FOR j := 0 to 7 - i := j*8 - dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) - ENDFOR - - - SSE -
xmmintrin.h
- Special Math Functions + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) +ENDFOR + + + SSE +
xmmintrin.h
+ Special Math Functions
- - - - Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum - values in "dst". - - FOR j := 0 to 7 - i := j*8 - dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) - ENDFOR - - - SSE -
xmmintrin.h
- Special Math Functions + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) +ENDFOR + + + SSE +
xmmintrin.h
+ Special Math Functions
- - - - Compare packed signed 16-bit integers in "a" and "b", and store packed minimum - values in "dst". - - FOR j := 0 to 3 - i := j*16 - dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) - ENDFOR - - - SSE -
xmmintrin.h
- Special Math Functions + + + + Compare packed signed 16-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) +ENDFOR + + + SSE +
xmmintrin.h
+ Special Math Functions
- - - - Compare packed signed 16-bit integers in "a" and "b", and store packed minimum - values in "dst". - - FOR j := 0 to 3 - i := j*16 - dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) - ENDFOR - - - SSE -
xmmintrin.h
- Special Math Functions + + + + Compare packed signed 16-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) +ENDFOR + + + SSE +
xmmintrin.h
+ Special Math Functions
- - - - Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum - values in "dst". - - FOR j := 0 to 7 - i := j*8 - dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) - ENDFOR - - - SSE -
xmmintrin.h
- Special Math Functions + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) +ENDFOR + + + SSE +
xmmintrin.h
+ Special Math Functions
- - - - Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum - values in "dst". - - FOR j := 0 to 7 - i := j*8 - dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) - ENDFOR - - - SSE -
xmmintrin.h
- Special Math Functions + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) +ENDFOR + + + SSE +
xmmintrin.h
+ Special Math Functions
- - - - Compare the lower single-precision (32-bit) floating-point elements in "a" and - "b", store the minimum value in the lower element of "dst", and copy the upper 3 packed - elements from "a" to the upper element of "dst". [min_float_note] - - dst[31:0] := MIN(a[31:0], b[31:0]) - dst[127:32] := a[127:32] - - - SSE -
xmmintrin.h
- Special Math Functions + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper element of "dst". [min_float_note] + +dst[31:0] := MIN(a[31:0], b[31:0]) +dst[127:32] := a[127:32] + + + SSE +
xmmintrin.h
+ Special Math Functions
- - - - Compare packed single-precision (32-bit) floating-point elements in "a" and - "b", and store packed minimum values in "dst". [min_float_note] - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ENDFOR - - - SSE -
xmmintrin.h
- Special Math Functions + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst". [min_float_note] + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) +ENDFOR + + + SSE +
xmmintrin.h
+ Special Math Functions
- - - - Compare the lower single-precision (32-bit) floating-point elements in "a" and - "b", store the maximum value in the lower element of "dst", and copy the upper 3 packed - elements from "a" to the upper element of "dst". [max_float_note] - - dst[31:0] := MAX(a[31:0], b[31:0]) - dst[127:32] := a[127:32] - - - SSE -
xmmintrin.h
- Special Math Functions + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper element of "dst". [max_float_note] + +dst[31:0] := MAX(a[31:0], b[31:0]) +dst[127:32] := a[127:32] + + + SSE +
xmmintrin.h
+ Special Math Functions
- - - - Compare packed single-precision (32-bit) floating-point elements in "a" and - "b", and store packed maximum values in "dst". [max_float_note] - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ENDFOR - - - SSE -
xmmintrin.h
- Special Math Functions + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst". [max_float_note] + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) +ENDFOR + + + SSE +
xmmintrin.h
+ Special Math Functions
- - - - Multiply the packed unsigned 16-bit integers in "a" and "b", producing - intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in - "dst". - - FOR j := 0 to 3 - i := j*16 - tmp[31:0] := a[i+15:i] * b[i+15:i] - dst[i+15:i] := tmp[31:16] - ENDFOR - - - SSE -
xmmintrin.h
- Arithmetic + + + + Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 3 + i := j*16 + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] +ENDFOR + + + SSE +
xmmintrin.h
+ Arithmetic
- - - - Multiply the packed unsigned 16-bit integers in "a" and "b", producing - intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in - "dst". - - FOR j := 0 to 3 - i := j*16 - tmp[31:0] := a[i+15:i] * b[i+15:i] - dst[i+15:i] := tmp[31:16] - ENDFOR - - - SSE -
xmmintrin.h
- Arithmetic + + + + Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 3 + i := j*16 + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] +ENDFOR + + + SSE +
xmmintrin.h
+ Arithmetic
- Miscellaneous - - - - Compute the absolute differences of packed unsigned 8-bit integers in "a" and - "b", then horizontally sum each consecutive 8 differences to produce four unsigned - 16-bit integers, and pack these unsigned 16-bit integers in the low 16 bits of "dst". - - FOR j := 0 to 7 - i := j*8 - tmp[i+7:i] := ABS(a[i+7:i] - b[i+7:i]) - ENDFOR - dst[15:0] := tmp[7:0] + tmp[15:8] + tmp[23:16] + tmp[31:24] + tmp[39:32] + tmp[47:40] + - tmp[55:48] + tmp[63:56] - dst[63:16] := 0 - - - SSE -
xmmintrin.h
- Arithmetic + Miscellaneous + + + + Compute the absolute differences of packed unsigned 8-bit integers in "a" and "b", then horizontally sum each consecutive 8 differences to produce four unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low 16 bits of "dst". + +FOR j := 0 to 7 + i := j*8 + tmp[i+7:i] := ABS(a[i+7:i] - b[i+7:i]) +ENDFOR +dst[15:0] := tmp[7:0] + tmp[15:8] + tmp[23:16] + tmp[31:24] + tmp[39:32] + tmp[47:40] + tmp[55:48] + tmp[63:56] +dst[63:16] := 0 + + + SSE +
xmmintrin.h
+ Arithmetic
- Miscellaneous - - - - Compute the absolute differences of packed unsigned 8-bit integers in "a" and - "b", then horizontally sum each consecutive 8 differences to produce four unsigned - 16-bit integers, and pack these unsigned 16-bit integers in the low 16 bits of "dst". - - FOR j := 0 to 7 - i := j*8 - tmp[i+7:i] := ABS(a[i+7:i] - b[i+7:i]) - ENDFOR - dst[15:0] := tmp[7:0] + tmp[15:8] + tmp[23:16] + tmp[31:24] + tmp[39:32] + tmp[47:40] + - tmp[55:48] + tmp[63:56] - dst[63:16] := 0 - - - SSE -
xmmintrin.h
- Arithmetic + Miscellaneous + + + + Compute the absolute differences of packed unsigned 8-bit integers in "a" and "b", then horizontally sum each consecutive 8 differences to produce four unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low 16 bits of "dst". + +FOR j := 0 to 7 + i := j*8 + tmp[i+7:i] := ABS(a[i+7:i] - b[i+7:i]) +ENDFOR +dst[15:0] := tmp[7:0] + tmp[15:8] + tmp[23:16] + tmp[31:24] + tmp[39:32] + tmp[47:40] + tmp[55:48] + tmp[63:56] +dst[63:16] := 0 + + + SSE +
xmmintrin.h
+ Arithmetic
- - - - Add the lower single-precision (32-bit) floating-point element in "a" and "b", - store the result in the lower element of "dst", and copy the upper 3 packed elements - from "a" to the upper elements of "dst". - - dst[31:0] := a[31:0] + b[31:0] - dst[127:32] := a[127:32] - - - SSE -
xmmintrin.h
- Arithmetic + + + + Add the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := a[31:0] + b[31:0] +dst[127:32] := a[127:32] + + + SSE +
xmmintrin.h
+ Arithmetic
- - - - Add packed single-precision (32-bit) floating-point elements in "a" and "b", - and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := a[i+31:i] + b[i+31:i] - ENDFOR - - - SSE -
xmmintrin.h
- Arithmetic + + + + Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[i+31:i] + b[i+31:i] +ENDFOR + + + SSE +
xmmintrin.h
+ Arithmetic
- - - - Subtract the lower single-precision (32-bit) floating-point element in "b" from - the lower single-precision (32-bit) floating-point element in "a", store the result in - the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper - elements of "dst". - - dst[31:0] := a[31:0] - b[31:0] - dst[127:32] := a[127:32] - - - SSE -
xmmintrin.h
- Arithmetic + + + + Subtract the lower single-precision (32-bit) floating-point element in "b" from the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := a[31:0] - b[31:0] +dst[127:32] := a[127:32] + + + SSE +
xmmintrin.h
+ Arithmetic
- - - - Subtract packed single-precision (32-bit) floating-point elements in "b" from - packed single-precision (32-bit) floating-point elements in "a", and store the results - in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := a[i+31:i] - b[i+31:i] - ENDFOR - - - SSE -
xmmintrin.h
- Arithmetic + + + + Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[i+31:i] - b[i+31:i] +ENDFOR + + + SSE +
xmmintrin.h
+ Arithmetic
- - - - Multiply the lower single-precision (32-bit) floating-point element in "a" and - "b", store the result in the lower element of "dst", and copy the upper 3 packed - elements from "a" to the upper elements of "dst". - - dst[31:0] := a[31:0] * b[31:0] - dst[127:32] := a[127:32] - - - SSE -
xmmintrin.h
- Arithmetic + + + + Multiply the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := a[31:0] * b[31:0] +dst[127:32] := a[127:32] + + + SSE +
xmmintrin.h
+ Arithmetic
- - - - Multiply packed single-precision (32-bit) floating-point elements in "a" and - "b", and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := a[i+31:i] * b[i+31:i] - ENDFOR - - - SSE -
xmmintrin.h
- Arithmetic + + + + Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[i+31:i] * b[i+31:i] +ENDFOR + + + SSE +
xmmintrin.h
+ Arithmetic
- - - - Divide the lower single-precision (32-bit) floating-point element in "a" by the - lower single-precision (32-bit) floating-point element in "b", store the result in the - lower element of "dst", and copy the upper 3 packed elements from "a" to the upper - elements of "dst". - - dst[31:0] := a[31:0] / b[31:0] - dst[127:32] := a[127:32] - - - SSE -
xmmintrin.h
- Arithmetic + + + + Divide the lower single-precision (32-bit) floating-point element in "a" by the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := a[31:0] / b[31:0] +dst[127:32] := a[127:32] + + + SSE +
xmmintrin.h
+ Arithmetic
- - - - Divide packed single-precision (32-bit) floating-point elements in "a" by - packed elements in "b", and store the results in "dst". - - FOR j := 0 to 3 - i := 32*j - dst[i+31:i] := a[i+31:i] / b[i+31:i] - ENDFOR - - - SSE -
xmmintrin.h
- Arithmetic + + + + Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := a[i+31:i] / b[i+31:i] +ENDFOR + + + SSE +
xmmintrin.h
+ Arithmetic
- - - - Average packed unsigned 8-bit integers in "a" and "b", and store the results in - "dst". - - FOR j := 0 to 7 - i := j*8 - dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 - ENDFOR - - - SSE -
xmmintrin.h
- Probability/Statistics + + + + Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 +ENDFOR + + + SSE +
xmmintrin.h
+ Probability/Statistics
- - - - Average packed unsigned 8-bit integers in "a" and "b", and store the results in - "dst". - - FOR j := 0 to 7 - i := j*8 - dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 - ENDFOR - - - SSE -
xmmintrin.h
- Probability/Statistics + + + + Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 +ENDFOR + + + SSE +
xmmintrin.h
+ Probability/Statistics
- - - - Average packed unsigned 16-bit integers in "a" and "b", and store the results - in "dst". - - FOR j := 0 to 3 - i := j*16 - dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 - ENDFOR - - - SSE -
xmmintrin.h
- Probability/Statistics + + + + Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 +ENDFOR + + + SSE +
xmmintrin.h
+ Probability/Statistics
- - - - Average packed unsigned 16-bit integers in "a" and "b", and store the results - in "dst". - - FOR j := 0 to 3 - i := j*16 - dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 - ENDFOR - - - SSE -
xmmintrin.h
- Probability/Statistics + + + + Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 +ENDFOR + + + SSE +
xmmintrin.h
+ Probability/Statistics
- - - - Convert the signed 32-bit integer "b" to a single-precision (32-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper 3 packed elements from "a" to the upper elements of "dst". - - dst[31:0] := Convert_Int32_To_FP32(b[31:0]) - dst[127:32] := a[127:32] - - - SSE -
xmmintrin.h
- Convert + + + + Convert the signed 32-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := Convert_Int32_To_FP32(b[31:0]) +dst[127:32] := a[127:32] + + + SSE +
xmmintrin.h
+ Convert
- - - - Convert the signed 32-bit integer "b" to a single-precision (32-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper 3 packed elements from "a" to the upper elements of "dst". - - dst[31:0] := Convert_Int32_To_FP32(b[31:0]) - dst[127:32] := a[127:32] - - - SSE -
xmmintrin.h
- Convert + + + + Convert the signed 32-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := Convert_Int32_To_FP32(b[31:0]) +dst[127:32] := a[127:32] + + + SSE +
xmmintrin.h
+ Convert
- - - - Convert the signed 64-bit integer "b" to a single-precision (32-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper 3 packed elements from "a" to the upper elements of "dst". - - dst[31:0] := Convert_Int64_To_FP32(b[63:0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - SSE -
xmmintrin.h
- Convert + + + + Convert the signed 64-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := Convert_Int64_To_FP32(b[63:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + SSE +
xmmintrin.h
+ Convert
- - - - Convert packed 32-bit integers in "b" to packed single-precision (32-bit) - floating-point elements, store the results in the lower 2 elements of "dst", and copy - the upper 2 packed elements from "a" to the upper elements of "dst". - - dst[31:0] := Convert_Int32_To_FP32(b[31:0]) - dst[63:32] := Convert_Int32_To_FP32(b[63:32]) - dst[95:64] := a[95:64] - dst[127:96] := a[127:96] - - - SSE -
xmmintrin.h
- Convert + + + + Convert packed 32-bit integers in "b" to packed single-precision (32-bit) floating-point elements, store the results in the lower 2 elements of "dst", and copy the upper 2 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := Convert_Int32_To_FP32(b[31:0]) +dst[63:32] := Convert_Int32_To_FP32(b[63:32]) +dst[95:64] := a[95:64] +dst[127:96] := a[127:96] + + + SSE +
xmmintrin.h
+ Convert
- - - - Convert packed signed 32-bit integers in "b" to packed single-precision - (32-bit) floating-point elements, store the results in the lower 2 elements of "dst", - and copy the upper 2 packed elements from "a" to the upper elements of "dst". - - dst[31:0] := Convert_Int32_To_FP32(b[31:0]) - dst[63:32] := Convert_Int32_To_FP32(b[63:32]) - dst[95:64] := a[95:64] - dst[127:96] := a[127:96] - - - SSE -
xmmintrin.h
- Convert + + + + Convert packed signed 32-bit integers in "b" to packed single-precision (32-bit) floating-point elements, store the results in the lower 2 elements of "dst", and copy the upper 2 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := Convert_Int32_To_FP32(b[31:0]) +dst[63:32] := Convert_Int32_To_FP32(b[63:32]) +dst[95:64] := a[95:64] +dst[127:96] := a[127:96] + + + SSE +
xmmintrin.h
+ Convert
- - - Convert packed 16-bit integers in "a" to packed single-precision (32-bit) - floating-point elements, and store the results in "dst". - - FOR j := 0 to 3 - i := j*16 - m := j*32 - dst[m+31:m] := Convert_Int16_To_FP32(a[i+15:i]) - ENDFOR - - SSE -
xmmintrin.h
- Convert + + + Convert packed 16-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + m := j*32 + dst[m+31:m] := Convert_Int16_To_FP32(a[i+15:i]) +ENDFOR + + SSE +
xmmintrin.h
+ Convert
- - - Convert packed unsigned 16-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 3 - i := j*16 - m := j*32 - dst[m+31:m] := Convert_Int16_To_FP32(a[i+15:i]) - ENDFOR - - SSE -
xmmintrin.h
- Convert + + + Convert packed unsigned 16-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := j*16 + m := j*32 + dst[m+31:m] := Convert_Int16_To_FP32(a[i+15:i]) +ENDFOR + + SSE +
xmmintrin.h
+ Convert
- - - Convert the lower packed 8-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 3 - i := j*8 - m := j*32 - dst[m+31:m] := Convert_Int8_To_FP32(a[i+7:i]) - ENDFOR - - SSE -
xmmintrin.h
- Convert + + + Convert the lower packed 8-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := j*8 + m := j*32 + dst[m+31:m] := Convert_Int8_To_FP32(a[i+7:i]) +ENDFOR + + SSE +
xmmintrin.h
+ Convert
- - - Convert the lower packed unsigned 8-bit integers in "a" to packed - single-precision (32-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 3 - i := j*8 - m := j*32 - dst[m+31:m] := Convert_Int8_To_FP32(a[i+7:i]) - ENDFOR - - SSE -
xmmintrin.h
- Convert + + + Convert the lower packed unsigned 8-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := j*8 + m := j*32 + dst[m+31:m] := Convert_Int8_To_FP32(a[i+7:i]) +ENDFOR + + SSE +
xmmintrin.h
+ Convert
- - - - Convert packed signed 32-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, store the results in the lower 2 elements of "dst", - then covert the packed signed 32-bit integers in "b" to single-precision (32-bit) - floating-point element, and store the results in the upper 2 elements of "dst". - - dst[31:0] := Convert_Int32_To_FP32(a[31:0]) - dst[63:32] := Convert_Int32_To_FP32(a[63:32]) - dst[95:64] := Convert_Int32_To_FP32(b[31:0]) - dst[127:96] := Convert_Int32_To_FP32(b[63:32]) - - SSE -
xmmintrin.h
- Convert + + + + Convert packed signed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, store the results in the lower 2 elements of "dst", then covert the packed signed 32-bit integers in "b" to single-precision (32-bit) floating-point element, and store the results in the upper 2 elements of "dst". + +dst[31:0] := Convert_Int32_To_FP32(a[31:0]) +dst[63:32] := Convert_Int32_To_FP32(a[63:32]) +dst[95:64] := Convert_Int32_To_FP32(b[31:0]) +dst[127:96] := Convert_Int32_To_FP32(b[63:32]) + + SSE +
xmmintrin.h
+ Convert
- - - Convert the lower single-precision (32-bit) floating-point element in "a" to a - 32-bit integer, and store the result in "dst". - - dst[31:0] := Convert_FP32_To_Int32(a[31:0]) - - - SSE -
xmmintrin.h
- Convert + + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst". + +dst[31:0] := Convert_FP32_To_Int32(a[31:0]) + + + SSE +
xmmintrin.h
+ Convert
- - - Convert the lower single-precision (32-bit) floating-point element in "a" to a - 32-bit integer, and store the result in "dst". - - dst[31:0] := Convert_FP32_To_Int32(a[31:0]) - - - SSE -
xmmintrin.h
- Convert + + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst". + +dst[31:0] := Convert_FP32_To_Int32(a[31:0]) + + + SSE +
xmmintrin.h
+ Convert
- - - Convert the lower single-precision (32-bit) floating-point element in "a" to a - 64-bit integer, and store the result in "dst". - - dst[63:0] := Convert_FP32_To_Int64(a[31:0]) - - - SSE -
xmmintrin.h
- Convert + + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst". + +dst[63:0] := Convert_FP32_To_Int64(a[31:0]) + + + SSE +
xmmintrin.h
+ Convert
- - - Copy the lower single-precision (32-bit) floating-point element of "a" to - "dst". - - dst[31:0] := a[31:0] - - - SSE -
xmmintrin.h
- Convert + + + Copy the lower single-precision (32-bit) floating-point element of "a" to "dst". + +dst[31:0] := a[31:0] + + + SSE +
xmmintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst". - - FOR j := 0 to 1 - i := 32*j - dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) - ENDFOR - - - SSE -
xmmintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := 32*j + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) +ENDFOR + + + SSE +
xmmintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst". - - FOR j := 0 to 1 - i := 32*j - dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) - ENDFOR - - - SSE -
xmmintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := 32*j + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) +ENDFOR + + + SSE +
xmmintrin.h
+ Convert
- - - Convert the lower single-precision (32-bit) floating-point element in "a" to a - 32-bit integer with truncation, and store the result in "dst". - - dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0]) - - - SSE -
xmmintrin.h
- Convert + + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst". + +dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0]) + + + SSE +
xmmintrin.h
+ Convert
- - - Convert the lower single-precision (32-bit) floating-point element in "a" to a - 32-bit integer with truncation, and store the result in "dst". - - dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0]) - - - SSE -
xmmintrin.h
- Convert + + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst". + +dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0]) + + + SSE +
xmmintrin.h
+ Convert
- - - Convert the lower single-precision (32-bit) floating-point element in "a" to a - 64-bit integer with truncation, and store the result in "dst". - - dst[63:0] := Convert_FP32_To_Int64_Truncate(a[31:0]) - - - SSE -
xmmintrin.h
- Convert + + + Convert the lower single-precision (32-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst". + +dst[63:0] := Convert_FP32_To_Int64_Truncate(a[31:0]) + + + SSE +
xmmintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 1 - i := 32*j - dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) - ENDFOR - - - SSE -
xmmintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 1 + i := 32*j + dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) +ENDFOR + + + SSE +
xmmintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 1 - i := 32*j - dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) - ENDFOR - - - SSE -
xmmintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 1 + i := 32*j + dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) +ENDFOR + + + SSE +
xmmintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 16-bit integers, and store the results in "dst". Note: this intrinsic will - generate 0x7FFF, rather than 0x8000, for input values between 0x7FFF and 0x7FFFFFFF. - - FOR j := 0 to 3 - i := 16*j - k := 32*j - IF a[k+31:k] >= FP32(0x7FFF) && a[k+31:k] <= FP32(0x7FFFFFFF) - dst[i+15:i] := 0x7FFF - ELSE - dst[i+15:i] := Convert_FP32_To_Int16(a[k+31:k]) - FI - ENDFOR - - SSE -
xmmintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 16-bit integers, and store the results in "dst". Note: this intrinsic will generate 0x7FFF, rather than 0x8000, for input values between 0x7FFF and 0x7FFFFFFF. + +FOR j := 0 to 3 + i := 16*j + k := 32*j + IF a[k+31:k] >= FP32(0x7FFF) && a[k+31:k] <= FP32(0x7FFFFFFF) + dst[i+15:i] := 0x7FFF + ELSE + dst[i+15:i] := Convert_FP32_To_Int16(a[k+31:k]) + FI +ENDFOR + + SSE +
xmmintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 8-bit integers, and store the results in lower 4 elements of "dst". Note: this - intrinsic will generate 0x7F, rather than 0x80, for input values between 0x7F and - 0x7FFFFFFF. - - FOR j := 0 to 3 - i := 8*j - k := 32*j - IF a[k+31:k] >= FP32(0x7F) && a[k+31:k] <= FP32(0x7FFFFFFF) - dst[i+7:i] := 0x7F - ELSE - dst[i+7:i] := Convert_FP32_To_Int8(a[k+31:k]) - FI - ENDFOR - - SSE -
xmmintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 8-bit integers, and store the results in lower 4 elements of "dst". Note: this intrinsic will generate 0x7F, rather than 0x80, for input values between 0x7F and 0x7FFFFFFF. + +FOR j := 0 to 3 + i := 8*j + k := 32*j + IF a[k+31:k] >= FP32(0x7F) && a[k+31:k] <= FP32(0x7FFFFFFF) + dst[i+7:i] := 0x7F + ELSE + dst[i+7:i] := Convert_FP32_To_Int8(a[k+31:k]) + FI +ENDFOR + + SSE +
xmmintrin.h
+ Convert
- - - - Store 64-bits of integer data from "a" into memory using a non-temporal memory - hint. - - MEM[mem_addr+63:mem_addr] := a[63:0] - - - SSE -
immintrin.h
- Store + + + + Store 64-bits of integer data from "a" into memory using a non-temporal memory hint. + +MEM[mem_addr+63:mem_addr] := a[63:0] + + + SSE +
immintrin.h
+ Store
- - - - - Conditionally store 8-bit integer elements from "a" into memory using "mask" - (elements are not stored when the highest bit is not set in the corresponding element) - and a non-temporal memory hint. - - FOR j := 0 to 7 - i := j*8 - IF mask[i+7] - MEM[mem_addr+i+7:mem_addr+i] := a[i+7:i] - FI - ENDFOR - - - SSE -
immintrin.h
- Store + + + + + Conditionally store 8-bit integer elements from "a" into memory using "mask" (elements are not stored when the highest bit is not set in the corresponding element) and a non-temporal memory hint. + +FOR j := 0 to 7 + i := j*8 + IF mask[i+7] + MEM[mem_addr+i+7:mem_addr+i] := a[i+7:i] + FI +ENDFOR + + + SSE +
immintrin.h
+ Store
- - - - - Conditionally store 8-bit integer elements from "a" into memory using "mask" - (elements are not stored when the highest bit is not set in the corresponding element). - - FOR j := 0 to 7 - i := j*8 - IF mask[i+7] - MEM[mem_addr+i+7:mem_addr+i] := a[i+7:i] - FI - ENDFOR - - - SSE -
immintrin.h
- Store + + + + + Conditionally store 8-bit integer elements from "a" into memory using "mask" (elements are not stored when the highest bit is not set in the corresponding element). + +FOR j := 0 to 7 + i := j*8 + IF mask[i+7] + MEM[mem_addr+i+7:mem_addr+i] := a[i+7:i] + FI +ENDFOR + + + SSE +
immintrin.h
+ Store
- - - - Store 128-bits (composed of 4 packed single-precision (32-bit) floating-point - elements) from "a" into memory using a non-temporal memory hint. - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+127:mem_addr] := a[127:0] - - - SSE -
immintrin.h
- Store + + + + Store 128-bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a" into memory using a non-temporal memory hint. + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+127:mem_addr] := a[127:0] + + + SSE +
immintrin.h
+ Store
- - - - Store the upper 2 single-precision (32-bit) floating-point elements from "a" - into memory. - - MEM[mem_addr+31:mem_addr] := a[95:64] - MEM[mem_addr+63:mem_addr+32] := a[127:96] - - - SSE -
immintrin.h
- Store + + + + Store the upper 2 single-precision (32-bit) floating-point elements from "a" into memory. + +MEM[mem_addr+31:mem_addr] := a[95:64] +MEM[mem_addr+63:mem_addr+32] := a[127:96] + + + SSE +
immintrin.h
+ Store
- - - - Store the lower 2 single-precision (32-bit) floating-point elements from "a" - into memory. - - MEM[mem_addr+31:mem_addr] := a[31:0] - MEM[mem_addr+63:mem_addr+32] := a[63:32] - - - SSE -
immintrin.h
- Store + + + + Store the lower 2 single-precision (32-bit) floating-point elements from "a" into memory. + +MEM[mem_addr+31:mem_addr] := a[31:0] +MEM[mem_addr+63:mem_addr+32] := a[63:32] + + + SSE +
immintrin.h
+ Store
- - - - Store the lower single-precision (32-bit) floating-point element from "a" into - memory. "mem_addr" does not need to be aligned on any particular boundary. - - MEM[mem_addr+31:mem_addr] := a[31:0] - - - SSE -
immintrin.h
- Store + + + + Store the lower single-precision (32-bit) floating-point element from "a" into memory. "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+31:mem_addr] := a[31:0] + + + SSE +
immintrin.h
+ Store
- - - - Store the lower single-precision (32-bit) floating-point element from "a" into - 4 contiguous elements in memory. "mem_addr" must be aligned on a 16-byte boundary or a - general-protection exception may be generated. - - MEM[mem_addr+31:mem_addr] := a[31:0] - MEM[mem_addr+63:mem_addr+32] := a[31:0] - MEM[mem_addr+95:mem_addr+64] := a[31:0] - MEM[mem_addr+127:mem_addr+96] := a[31:0] - - SSE -
immintrin.h
- Store + + + + Store the lower single-precision (32-bit) floating-point element from "a" into 4 contiguous elements in memory. "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+31:mem_addr] := a[31:0] +MEM[mem_addr+63:mem_addr+32] := a[31:0] +MEM[mem_addr+95:mem_addr+64] := a[31:0] +MEM[mem_addr+127:mem_addr+96] := a[31:0] + + SSE +
immintrin.h
+ Store
- - - - Store the lower single-precision (32-bit) floating-point element from "a" into - 4 contiguous elements in memory. "mem_addr" must be aligned on a 16-byte boundary or a - general-protection exception may be generated. - - MEM[mem_addr+31:mem_addr] := a[31:0] - MEM[mem_addr+63:mem_addr+32] := a[31:0] - MEM[mem_addr+95:mem_addr+64] := a[31:0] - MEM[mem_addr+127:mem_addr+96] := a[31:0] - - SSE -
immintrin.h
- Store + + + + Store the lower single-precision (32-bit) floating-point element from "a" into 4 contiguous elements in memory. "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+31:mem_addr] := a[31:0] +MEM[mem_addr+63:mem_addr+32] := a[31:0] +MEM[mem_addr+95:mem_addr+64] := a[31:0] +MEM[mem_addr+127:mem_addr+96] := a[31:0] + + SSE +
immintrin.h
+ Store
- - - - Store 128-bits (composed of 4 packed single-precision (32-bit) floating-point - elements) from "a" into memory. - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+127:mem_addr] := a[127:0] - - - SSE -
immintrin.h
- Store + + + + Store 128-bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a" into memory. + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+127:mem_addr] := a[127:0] + + + SSE +
immintrin.h
+ Store
- - - - Store 128-bits (composed of 4 packed single-precision (32-bit) floating-point - elements) from "a" into memory. - "mem_addr" does not need to be aligned on any particular boundary. - - MEM[mem_addr+127:mem_addr] := a[127:0] - - - SSE -
immintrin.h
- Store + + + + Store 128-bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+127:mem_addr] := a[127:0] + + + SSE +
immintrin.h
+ Store
- - - - Store 4 single-precision (32-bit) floating-point elements from "a" into memory - in reverse order. - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+31:mem_addr] := a[127:96] - MEM[mem_addr+63:mem_addr+32] := a[95:64] - MEM[mem_addr+95:mem_addr+64] := a[63:32] - MEM[mem_addr+127:mem_addr+96] := a[31:0] - - - SSE -
immintrin.h
- Store + + + + Store 4 single-precision (32-bit) floating-point elements from "a" into memory in reverse order. + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+31:mem_addr] := a[127:96] +MEM[mem_addr+63:mem_addr+32] := a[95:64] +MEM[mem_addr+95:mem_addr+64] := a[63:32] +MEM[mem_addr+127:mem_addr+96] := a[31:0] + + + SSE +
immintrin.h
+ Store
- - - Create mask from the most significant bit of each 8-bit element in "a", and - store the result in "dst". - - FOR j := 0 to 7 - i := j*8 - dst[j] := a[i+7] - ENDFOR - dst[MAX:8] := 0 - - - SSE -
xmmintrin.h
- Miscellaneous + + + Create mask from the most significant bit of each 8-bit element in "a", and store the result in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[j] := a[i+7] +ENDFOR +dst[MAX:8] := 0 + + + SSE +
xmmintrin.h
+ Miscellaneous
- - - Create mask from the most significant bit of each 8-bit element in "a", and - store the result in "dst". - - FOR j := 0 to 7 - i := j*8 - dst[j] := a[i+7] - ENDFOR - dst[MAX:8] := 0 - - - SSE -
xmmintrin.h
- Miscellaneous + + + Create mask from the most significant bit of each 8-bit element in "a", and store the result in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[j] := a[i+7] +ENDFOR +dst[MAX:8] := 0 + + + SSE +
xmmintrin.h
+ Miscellaneous
- - - Set each bit of mask "dst" based on the most significant bit of the - corresponding packed single-precision (32-bit) floating-point element in "a". - - FOR j := 0 to 3 - i := j*32 - IF a[i+31] - dst[j] := 1 - ELSE - dst[j] := 0 - FI - ENDFOR - dst[MAX:4] := 0 - - - SSE -
xmmintrin.h
- Miscellaneous + + + Set each bit of mask "dst" based on the most significant bit of the corresponding packed single-precision (32-bit) floating-point element in "a". + +FOR j := 0 to 3 + i := j*32 + IF a[i+31] + dst[j] := 1 + ELSE + dst[j] := 0 + FI +ENDFOR +dst[MAX:4] := 0 + + + SSE +
xmmintrin.h
+ Miscellaneous
- - - Compute the square root of the lower single-precision (32-bit) floating-point - element in "a", store the result in the lower element of "dst", and copy the upper 3 - packed elements from "a" to the upper elements of "dst". - - dst[31:0] := SQRT(a[31:0]) - dst[127:32] := a[127:32] - - - SSE -
xmmintrin.h
- Elementary Math Functions + + + Compute the square root of the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := SQRT(a[31:0]) +dst[127:32] := a[127:32] + + + SSE +
xmmintrin.h
+ Elementary Math Functions
- - - Compute the square root of packed single-precision (32-bit) floating-point - elements in "a", and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := SQRT(a[i+31:i]) - ENDFOR - - - SSE -
xmmintrin.h
- Elementary Math Functions + + + Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := SQRT(a[i+31:i]) +ENDFOR + + + SSE +
xmmintrin.h
+ Elementary Math Functions
- - - Compute the approximate reciprocal of the lower single-precision (32-bit) - floating-point element in "a", store the result in the lower element of "dst", and copy - the upper 3 packed elements from "a" to the upper elements of "dst". The maximum - relative error for this approximation is less than 1.5*2^-12. - - dst[31:0] := (1.0 / a[31:0]) - dst[127:32] := a[127:32] - - - SSE -
xmmintrin.h
- Elementary Math Functions + + + Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 1.5*2^-12. + +dst[31:0] := (1.0 / a[31:0]) +dst[127:32] := a[127:32] + + + SSE +
xmmintrin.h
+ Elementary Math Functions
- - - Compute the approximate reciprocal of packed single-precision (32-bit) - floating-point elements in "a", and store the results in "dst". The maximum relative - error for this approximation is less than 1.5*2^-12. - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := (1.0 / a[i+31:i]) - ENDFOR - - - SSE -
xmmintrin.h
- Elementary Math Functions + + + Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 1.5*2^-12. + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := (1.0 / a[i+31:i]) +ENDFOR + + + SSE +
xmmintrin.h
+ Elementary Math Functions
- - - Compute the approximate reciprocal square root of the lower single-precision - (32-bit) floating-point element in "a", store the result in the lower element of "dst", - and copy the upper 3 packed elements from "a" to the upper elements of "dst". The - maximum relative error for this approximation is less than 1.5*2^-12. - - dst[31:0] := (1.0 / SQRT(a[31:0])) - dst[127:32] := a[127:32] - - - SSE -
xmmintrin.h
- Elementary Math Functions + + + Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 1.5*2^-12. + +dst[31:0] := (1.0 / SQRT(a[31:0])) +dst[127:32] := a[127:32] + + + SSE +
xmmintrin.h
+ Elementary Math Functions
- - - Compute the approximate reciprocal square root of packed single-precision - (32-bit) floating-point elements in "a", and store the results in "dst". The maximum - relative error for this approximation is less than 1.5*2^-12. - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := (1.0 / SQRT(a[i+31:i])) - ENDFOR - - - SSE -
xmmintrin.h
- Elementary Math Functions + + + Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 1.5*2^-12. + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := (1.0 / SQRT(a[i+31:i])) +ENDFOR + + + SSE +
xmmintrin.h
+ Elementary Math Functions
- - - - Compute the bitwise AND of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) - ENDFOR - - - SSE -
xmmintrin.h
- Logical + + + + Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := (a[i+31:i] AND b[i+31:i]) +ENDFOR + + + SSE +
xmmintrin.h
+ Logical
- - - - Compute the bitwise NOT of packed single-precision (32-bit) floating-point - elements in "a" and then AND with "b", and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) - ENDFOR - - - SSE -
xmmintrin.h
- Logical + + + + Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i]) +ENDFOR + + + SSE +
xmmintrin.h
+ Logical
- - - - Compute the bitwise OR of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := a[i+31:i] OR b[i+31:i] - ENDFOR - - - SSE -
xmmintrin.h
- Logical + + + + Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[i+31:i] OR b[i+31:i] +ENDFOR + + + SSE +
xmmintrin.h
+ Logical
- - - - Compute the bitwise XOR of packed single-precision (32-bit) floating-point - elements in "a" and "b", and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := a[i+31:i] XOR b[i+31:i] - ENDFOR - - - SSE -
xmmintrin.h
- Logical + + + + Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[i+31:i] XOR b[i+31:i] +ENDFOR + + + SSE +
xmmintrin.h
+ Logical
- - - - Compare the lower single-precision (32-bit) floating-point elements in "a" and - "b" for equality, store the result in the lower element of "dst", and copy the upper 3 - packed elements from "a" to the upper elements of "dst". - - dst[31:0] := ( a[31:0] == b[31:0] ) ? 0xFFFFFFFF : 0 - dst[127:32] := a[127:32] - - - SSE -
xmmintrin.h
- Compare + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for equality, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := ( a[31:0] == b[31:0] ) ? 0xFFFFFFFF : 0 +dst[127:32] := a[127:32] + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - for equality, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ( a[i+31:i] == b[i+31:i] ) ? 0xFFFFFFFF : 0 - ENDFOR - - - SSE -
xmmintrin.h
- Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ( a[i+31:i] == b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare the lower single-precision (32-bit) floating-point elements in "a" and - "b" for less-than, store the result in the lower element of "dst", and copy the upper 3 - packed elements from "a" to the upper elements of "dst". - - dst[31:0] := ( a[31:0] < b[31:0] ) ? 0xFFFFFFFF : 0 - dst[127:32] := a[127:32] - - - SSE -
xmmintrin.h
- Compare + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for less-than, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := ( a[31:0] < b[31:0] ) ? 0xFFFFFFFF : 0 +dst[127:32] := a[127:32] + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - for less-than, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ( a[i+31:i] < b[i+31:i] ) ? 0xFFFFFFFF : 0 - ENDFOR - - - SSE -
xmmintrin.h
- Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for less-than, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ( a[i+31:i] < b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare the lower single-precision (32-bit) floating-point elements in "a" and - "b" for less-than-or-equal, store the result in the lower element of "dst", and copy the - upper 3 packed elements from "a" to the upper elements of "dst". - - dst[31:0] := ( a[31:0] <= b[31:0] ) ? 0xFFFFFFFF : 0 - dst[127:32] := a[127:32] - - - SSE -
xmmintrin.h
- Compare + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for less-than-or-equal, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := ( a[31:0] <= b[31:0] ) ? 0xFFFFFFFF : 0 +dst[127:32] := a[127:32] + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - for less-than-or-equal, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ( a[i+31:i] <= b[i+31:i] ) ? 0xFFFFFFFF : 0 - ENDFOR - - - SSE -
xmmintrin.h
- Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for less-than-or-equal, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ( a[i+31:i] <= b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare the lower single-precision (32-bit) floating-point elements in "a" and - "b" for greater-than, store the result in the lower element of "dst", and copy the upper - 3 packed elements from "a" to the upper elements of "dst". - - dst[31:0] := ( a[31:0] > b[31:0] ) ? 0xFFFFFFFF : 0 - dst[127:32] := a[127:32] - - - SSE -
xmmintrin.h
- Compare + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for greater-than, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := ( a[31:0] > b[31:0] ) ? 0xFFFFFFFF : 0 +dst[127:32] := a[127:32] + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - for greater-than, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ( a[i+31:i] > b[i+31:i] ) ? 0xFFFFFFFF : 0 - ENDFOR - - - SSE -
xmmintrin.h
- Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ( a[i+31:i] > b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare the lower single-precision (32-bit) floating-point elements in "a" and - "b" for greater-than-or-equal, store the result in the lower element of "dst", and copy - the upper 3 packed elements from "a" to the upper elements of "dst". - - dst[31:0] := ( a[31:0] >= b[31:0] ) ? 0xFFFFFFFF : 0 - dst[127:32] := a[127:32] - - - SSE -
xmmintrin.h
- Compare + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for greater-than-or-equal, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := ( a[31:0] >= b[31:0] ) ? 0xFFFFFFFF : 0 +dst[127:32] := a[127:32] + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - for greater-than-or-equal, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ( a[i+31:i] >= b[i+31:i] ) ? 0xFFFFFFFF : 0 - ENDFOR - - - SSE -
xmmintrin.h
- Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for greater-than-or-equal, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ( a[i+31:i] >= b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare the lower single-precision (32-bit) floating-point elements in "a" and - "b" for not-equal, store the result in the lower element of "dst", and copy the upper 3 - packed elements from "a" to the upper elements of "dst". - - dst[31:0] := ( a[31:0] != b[31:0] ) ? 0xFFFFFFFF : 0 - dst[127:32] := a[127:32] - - - SSE -
xmmintrin.h
- Compare + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for not-equal, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := ( a[31:0] != b[31:0] ) ? 0xFFFFFFFF : 0 +dst[127:32] := a[127:32] + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - for not-equal, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ( a[i+31:i] != b[i+31:i] ) ? 0xFFFFFFFF : 0 - ENDFOR - - - SSE -
xmmintrin.h
- Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-equal, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ( a[i+31:i] != b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare the lower single-precision (32-bit) floating-point elements in "a" and - "b" for not-less-than, store the result in the lower element of "dst", and copy the - upper 3 packed elements from "a" to the upper elements of "dst". - - dst[31:0] := (!( a[31:0] < b[31:0] )) ? 0xFFFFFFFF : 0 - dst[127:32] := a[127:32] - - - SSE -
xmmintrin.h
- Compare + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for not-less-than, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := (!( a[31:0] < b[31:0] )) ? 0xFFFFFFFF : 0 +dst[127:32] := a[127:32] + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - for not-less-than, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := !( a[i+31:i] < b[i+31:i] ) ? 0xFFFFFFFF : 0 - ENDFOR - - - SSE -
xmmintrin.h
- Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-less-than, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := !( a[i+31:i] < b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare the lower single-precision (32-bit) floating-point elements in "a" and - "b" for not-less-than-or-equal, store the result in the lower element of "dst", and copy - the upper 3 packed elements from "a" to the upper elements of "dst". - - dst[31:0] := (!( a[31:0] <= b[31:0] )) ? 0xFFFFFFFF : 0 - dst[127:32] := a[127:32] - - - SSE -
xmmintrin.h
- Compare + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for not-less-than-or-equal, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := (!( a[31:0] <= b[31:0] )) ? 0xFFFFFFFF : 0 +dst[127:32] := a[127:32] + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - for not-less-than-or-equal, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := (!( a[i+31:i] <= b[i+31:i] )) ? 0xFFFFFFFF : 0 - ENDFOR - - - SSE -
xmmintrin.h
- Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-less-than-or-equal, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := (!( a[i+31:i] <= b[i+31:i] )) ? 0xFFFFFFFF : 0 +ENDFOR + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare the lower single-precision (32-bit) floating-point elements in "a" and - "b" for not-greater-than, store the result in the lower element of "dst", and copy the - upper 3 packed elements from "a" to the upper elements of "dst". - - dst[31:0] := (!( a[31:0] > b[31:0] )) ? 0xFFFFFFFF : 0 - dst[127:32] := a[127:32] - - - SSE -
xmmintrin.h
- Compare + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for not-greater-than, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := (!( a[31:0] > b[31:0] )) ? 0xFFFFFFFF : 0 +dst[127:32] := a[127:32] + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - for not-greater-than, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := (!( a[i+31:i] > b[i+31:i] )) ? 0xFFFFFFFF : 0 - ENDFOR - - - SSE -
xmmintrin.h
- Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-greater-than, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := (!( a[i+31:i] > b[i+31:i] )) ? 0xFFFFFFFF : 0 +ENDFOR + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare the lower single-precision (32-bit) floating-point elements in "a" and - "b" for not-greater-than-or-equal, store the result in the lower element of "dst", and - copy the upper 3 packed elements from "a" to the upper elements of "dst". - - dst[31:0] := (!( a[31:0] >= b[31:0] )) ? 0xFFFFFFFF : 0 - dst[127:32] := a[127:32] - - - SSE -
xmmintrin.h
- Compare + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for not-greater-than-or-equal, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := (!( a[31:0] >= b[31:0] )) ? 0xFFFFFFFF : 0 +dst[127:32] := a[127:32] + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - for not-greater-than-or-equal, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := (!( a[i+31:i] >= b[i+31:i] )) ? 0xFFFFFFFF : 0 - ENDFOR - - - SSE -
xmmintrin.h
- Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-greater-than-or-equal, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := (!( a[i+31:i] >= b[i+31:i] )) ? 0xFFFFFFFF : 0 +ENDFOR + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare the lower single-precision (32-bit) floating-point elements in "a" and - "b" to see if neither is NaN, store the result in the lower element of "dst", and copy - the upper 3 packed elements from "a" to the upper elements of "dst". - dst[31:0] := ( a[31:0] != NaN AND b[31:0] != NaN ) ? 0xFFFFFFFF : 0 - dst[127:32] := a[127:32] - - - SSE -
xmmintrin.h
- Compare + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" to see if neither is NaN, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + dst[31:0] := ( a[31:0] != NaN AND b[31:0] != NaN ) ? 0xFFFFFFFF : 0 +dst[127:32] := a[127:32] + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - to see if neither is NaN, and store the results in "dst". - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ( a[i+31:i] != NaN AND b[i+31:i] != NaN ) ? 0xFFFFFFFF : 0 - ENDFOR - - - SSE -
xmmintrin.h
- Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" to see if neither is NaN, and store the results in "dst". + FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ( a[i+31:i] != NaN AND b[i+31:i] != NaN ) ? 0xFFFFFFFF : 0 +ENDFOR + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare the lower single-precision (32-bit) floating-point elements in "a" and - "b" to see if either is NaN, store the result in the lower element of "dst", and copy - the upper 3 packed elements from "a" to the upper elements of "dst". - dst[31:0] := ( a[31:0] == NaN OR b[31:0] == NaN ) ? 0xFFFFFFFF : 0 - dst[127:32] := a[127:32] - - - SSE -
xmmintrin.h
- Compare + + + + Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" to see if either is NaN, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + dst[31:0] := ( a[31:0] == NaN OR b[31:0] == NaN ) ? 0xFFFFFFFF : 0 +dst[127:32] := a[127:32] + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare packed single-precision (32-bit) floating-point elements in "a" and "b" - to see if either is NaN, and store the results in "dst". - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ( a[i+31:i] == NaN OR b[i+31:i] == NaN ) ? 0xFFFFFFFF : 0 - ENDFOR - - - SSE -
xmmintrin.h
- Compare + + + + Compare packed single-precision (32-bit) floating-point elements in "a" and "b" to see if either is NaN, and store the results in "dst". + FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ( a[i+31:i] == NaN OR b[i+31:i] == NaN ) ? 0xFFFFFFFF : 0 +ENDFOR + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare the lower single-precision (32-bit) floating-point element in "a" and - "b" for equality, and return the boolean result (0 or 1). - RETURN ( a[31:0] != NaN AND b[31:0] != NaN AND a[31:0] == b[31:0] ) ? 1 : 0 - - - SSE -
xmmintrin.h
- Compare + + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for equality, and return the boolean result (0 or 1). + RETURN ( a[31:0] != NaN AND b[31:0] != NaN AND a[31:0] == b[31:0] ) ? 1 : 0 + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare the lower single-precision (32-bit) floating-point element in "a" and - "b" for less-than, and return the boolean result (0 or 1). - RETURN ( a[31:0] != NaN AND b[31:0] != NaN AND a[31:0] < b[31:0] ) ? 1 : 0 - - - SSE -
xmmintrin.h
- Compare + + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for less-than, and return the boolean result (0 or 1). + RETURN ( a[31:0] != NaN AND b[31:0] != NaN AND a[31:0] < b[31:0] ) ? 1 : 0 + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare the lower single-precision (32-bit) floating-point element in "a" and - "b" for less-than-or-equal, and return the boolean result (0 or 1). - RETURN ( a[31:0] != NaN AND b[31:0] != NaN AND a[31:0] <= b[31:0] ) ? 1 : 0 - - - SSE -
xmmintrin.h
- Compare + + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for less-than-or-equal, and return the boolean result (0 or 1). + RETURN ( a[31:0] != NaN AND b[31:0] != NaN AND a[31:0] <= b[31:0] ) ? 1 : 0 + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare the lower single-precision (32-bit) floating-point element in "a" and - "b" for greater-than, and return the boolean result (0 or 1). - RETURN ( a[31:0] != NaN AND b[31:0] != NaN AND a[31:0] > b[31:0] ) ? 1 : 0 - - - SSE -
xmmintrin.h
- Compare + + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for greater-than, and return the boolean result (0 or 1). + RETURN ( a[31:0] != NaN AND b[31:0] != NaN AND a[31:0] > b[31:0] ) ? 1 : 0 + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare the lower single-precision (32-bit) floating-point element in "a" and - "b" for greater-than-or-equal, and return the boolean result (0 or 1). - RETURN ( a[31:0] != NaN AND b[31:0] != NaN AND a[31:0] >= b[31:0] ) ? 1 : 0 - - - SSE -
xmmintrin.h
- Compare + + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for greater-than-or-equal, and return the boolean result (0 or 1). + RETURN ( a[31:0] != NaN AND b[31:0] != NaN AND a[31:0] >= b[31:0] ) ? 1 : 0 + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare the lower single-precision (32-bit) floating-point element in "a" and - "b" for not-equal, and return the boolean result (0 or 1). - RETURN ( a[31:0] == NaN OR b[31:0] == NaN OR a[31:0] != b[31:0] ) ? 1 : 0 - - - SSE -
xmmintrin.h
- Compare + + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for not-equal, and return the boolean result (0 or 1). + RETURN ( a[31:0] == NaN OR b[31:0] == NaN OR a[31:0] != b[31:0] ) ? 1 : 0 + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare the lower single-precision (32-bit) floating-point element in "a" and - "b" for equality, and return the boolean result (0 or 1). This instruction will not - signal an exception for QNaNs. - RETURN ( a[31:0] != NaN AND b[31:0] != NaN AND a[31:0] == b[31:0] ) ? 1 : 0 - - - SSE -
xmmintrin.h
- Compare + + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for equality, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + RETURN ( a[31:0] != NaN AND b[31:0] != NaN AND a[31:0] == b[31:0] ) ? 1 : 0 + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare the lower single-precision (32-bit) floating-point element in "a" and - "b" for less-than, and return the boolean result (0 or 1). This instruction will not - signal an exception for QNaNs. - RETURN ( a[31:0] != NaN AND b[31:0] != NaN AND a[31:0] < b[31:0] ) ? 1 : 0 - - - SSE -
xmmintrin.h
- Compare + + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for less-than, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + RETURN ( a[31:0] != NaN AND b[31:0] != NaN AND a[31:0] < b[31:0] ) ? 1 : 0 + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare the lower single-precision (32-bit) floating-point element in "a" and - "b" for less-than-or-equal, and return the boolean result (0 or 1). This instruction - will not signal an exception for QNaNs. - RETURN ( a[31:0] != NaN AND b[31:0] != NaN AND a[31:0] <= b[31:0] ) ? 1 : 0 - - - SSE -
xmmintrin.h
- Compare + + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for less-than-or-equal, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + RETURN ( a[31:0] != NaN AND b[31:0] != NaN AND a[31:0] <= b[31:0] ) ? 1 : 0 + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare the lower single-precision (32-bit) floating-point element in "a" and - "b" for greater-than, and return the boolean result (0 or 1). This instruction will not - signal an exception for QNaNs. - RETURN ( a[31:0] != NaN AND b[31:0] != NaN AND a[31:0] > b[31:0] ) ? 1 : 0 - - - SSE -
xmmintrin.h
- Compare + + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for greater-than, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + RETURN ( a[31:0] != NaN AND b[31:0] != NaN AND a[31:0] > b[31:0] ) ? 1 : 0 + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare the lower single-precision (32-bit) floating-point element in "a" and - "b" for greater-than-or-equal, and return the boolean result (0 or 1). This instruction - will not signal an exception for QNaNs. - RETURN ( a[31:0] != NaN AND b[31:0] != NaN AND a[31:0] >= b[31:0] ) ? 1 : 0 - - - SSE -
xmmintrin.h
- Compare + + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for greater-than-or-equal, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + RETURN ( a[31:0] != NaN AND b[31:0] != NaN AND a[31:0] >= b[31:0] ) ? 1 : 0 + + + SSE +
xmmintrin.h
+ Compare
- - - - Compare the lower single-precision (32-bit) floating-point element in "a" and - "b" for not-equal, and return the boolean result (0 or 1). This instruction will not - signal an exception for QNaNs. - RETURN ( a[31:0] == NaN OR b[31:0] == NaN OR a[31:0] != b[31:0] ) ? 1 : 0 - - - SSE -
xmmintrin.h
- Compare + + + + Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for not-equal, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + RETURN ( a[31:0] == NaN OR b[31:0] == NaN OR a[31:0] != b[31:0] ) ? 1 : 0 + + + SSE +
xmmintrin.h
+ Compare
- - - Copy single-precision (32-bit) floating-point element "a" to the lower element - of "dst", and zero the upper 3 elements. - - dst[31:0] := a[31:0] - dst[127:32] := 0 - - SSE -
xmmintrin.h
- Set + + + Copy single-precision (32-bit) floating-point element "a" to the lower element of "dst", and zero the upper 3 elements. + +dst[31:0] := a[31:0] +dst[127:32] := 0 + + SSE +
xmmintrin.h
+ Set
- - - Broadcast single-precision (32-bit) floating-point value "a" to all elements of - "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := a[31:0] - ENDFOR - - SSE -
xmmintrin.h
- Set + + + Broadcast single-precision (32-bit) floating-point value "a" to all elements of "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR + + SSE +
xmmintrin.h
+ Set
- - - Broadcast single-precision (32-bit) floating-point value "a" to all elements of - "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := a[31:0] - ENDFOR - - SSE -
xmmintrin.h
- Set + + + Broadcast single-precision (32-bit) floating-point value "a" to all elements of "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR + + SSE +
xmmintrin.h
+ Set
- - - - - - Set packed single-precision (32-bit) floating-point elements in "dst" with the - supplied values. - - dst[31:0] := e0 - dst[63:32] := e1 - dst[95:64] := e2 - dst[127:96] := e3 - - SSE -
xmmintrin.h
- Set + + + + + + Set packed single-precision (32-bit) floating-point elements in "dst" with the supplied values. + +dst[31:0] := e0 +dst[63:32] := e1 +dst[95:64] := e2 +dst[127:96] := e3 + + SSE +
xmmintrin.h
+ Set
- - - - - - Set packed single-precision (32-bit) floating-point elements in "dst" with the - supplied values in reverse order. - - dst[31:0] := e3 - dst[63:32] := e2 - dst[95:64] := e1 - dst[127:96] := e0 - - SSE -
xmmintrin.h
- Set + + + + + + Set packed single-precision (32-bit) floating-point elements in "dst" with the supplied values in reverse order. + +dst[31:0] := e3 +dst[63:32] := e2 +dst[95:64] := e1 +dst[127:96] := e0 + + SSE +
xmmintrin.h
+ Set
- - - Return vector of type __m128 with all elements set to zero. - - dst[MAX:0] := 0 - - - SSE -
xmmintrin.h
- Set + + + Return vector of type __m128 with all elements set to zero. + +dst[MAX:0] := 0 + + + SSE +
xmmintrin.h
+ Set
- - - - Load 2 single-precision (32-bit) floating-point elements from memory into the - upper 2 elements of "dst", and copy the lower 2 elements from "a" to "dst". "mem_addr" - does not need to be aligned on any particular boundary. - - dst[31:0] := a[31:0] - dst[63:32] := a[63:32] - dst[95:64] := MEM[mem_addr+31:mem_addr] - dst[127:96] := MEM[mem_addr+63:mem_addr+32] - - - SSE -
immintrin.h
- Load + + + + Load 2 single-precision (32-bit) floating-point elements from memory into the upper 2 elements of "dst", and copy the lower 2 elements from "a" to "dst". "mem_addr" does not need to be aligned on any particular boundary. + +dst[31:0] := a[31:0] +dst[63:32] := a[63:32] +dst[95:64] := MEM[mem_addr+31:mem_addr] +dst[127:96] := MEM[mem_addr+63:mem_addr+32] + + + SSE +
immintrin.h
+ Load
- - - - Load 2 single-precision (32-bit) floating-point elements from memory into the - lower 2 elements of "dst", and copy the upper 2 elements from "a" to "dst". "mem_addr" - does not need to be aligned on any particular boundary. - - dst[31:0] := MEM[mem_addr+31:mem_addr] - dst[63:32] := MEM[mem_addr+63:mem_addr+32] - dst[95:64] := a[95:64] - dst[127:96] := a[127:96] - - - SSE -
immintrin.h
- Load + + + + Load 2 single-precision (32-bit) floating-point elements from memory into the lower 2 elements of "dst", and copy the upper 2 elements from "a" to "dst". "mem_addr" does not need to be aligned on any particular boundary. + +dst[31:0] := MEM[mem_addr+31:mem_addr] +dst[63:32] := MEM[mem_addr+63:mem_addr+32] +dst[95:64] := a[95:64] +dst[127:96] := a[127:96] + + + SSE +
immintrin.h
+ Load
- - - Load a single-precision (32-bit) floating-point element from memory into the - lower of "dst", and zero the upper 3 elements. "mem_addr" does not need to be aligned on - any particular boundary. - - dst[31:0] := MEM[mem_addr+31:mem_addr] - dst[127:32] := 0 - - - SSE -
immintrin.h
- Load + + + Load a single-precision (32-bit) floating-point element from memory into the lower of "dst", and zero the upper 3 elements. "mem_addr" does not need to be aligned on any particular boundary. + +dst[31:0] := MEM[mem_addr+31:mem_addr] +dst[127:32] := 0 + + + SSE +
immintrin.h
+ Load
- - - Load a single-precision (32-bit) floating-point element from memory into all - elements of "dst". - - dst[31:0] := MEM[mem_addr+31:mem_addr] - dst[63:32] := MEM[mem_addr+31:mem_addr] - dst[95:64] := MEM[mem_addr+31:mem_addr] - dst[127:96] := MEM[mem_addr+31:mem_addr] - - SSE -
immintrin.h
- Load + + + Load a single-precision (32-bit) floating-point element from memory into all elements of "dst". + +dst[31:0] := MEM[mem_addr+31:mem_addr] +dst[63:32] := MEM[mem_addr+31:mem_addr] +dst[95:64] := MEM[mem_addr+31:mem_addr] +dst[127:96] := MEM[mem_addr+31:mem_addr] + + SSE +
immintrin.h
+ Load
- - - - Load a single-precision (32-bit) floating-point element from memory into all - elements of "dst". - - dst[31:0] := MEM[mem_addr+31:mem_addr] - dst[63:32] := MEM[mem_addr+31:mem_addr] - dst[95:64] := MEM[mem_addr+31:mem_addr] - dst[127:96] := MEM[mem_addr+31:mem_addr] - - SSE -
immintrin.h
- Load + + + + Load a single-precision (32-bit) floating-point element from memory into all elements of "dst". + +dst[31:0] := MEM[mem_addr+31:mem_addr] +dst[63:32] := MEM[mem_addr+31:mem_addr] +dst[95:64] := MEM[mem_addr+31:mem_addr] +dst[127:96] := MEM[mem_addr+31:mem_addr] + + SSE +
immintrin.h
+ Load
- - - Load 128-bits (composed of 4 packed single-precision (32-bit) floating-point - elements) from memory into "dst". - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - dst[127:0] := MEM[mem_addr+127:mem_addr] - - - SSE -
immintrin.h
- Load + + + Load 128-bits (composed of 4 packed single-precision (32-bit) floating-point elements) from memory into "dst". + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +dst[127:0] := MEM[mem_addr+127:mem_addr] + + + SSE +
immintrin.h
+ Load
- - - Load 128-bits (composed of 4 packed single-precision (32-bit) floating-point - elements) from memory into "dst". - "mem_addr" does not need to be aligned on any particular boundary. - - dst[127:0] := MEM[mem_addr+127:mem_addr] - - - SSE -
immintrin.h
- Load + + + Load 128-bits (composed of 4 packed single-precision (32-bit) floating-point elements) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[127:0] := MEM[mem_addr+127:mem_addr] + + + SSE +
immintrin.h
+ Load
- - - Load 4 single-precision (32-bit) floating-point elements from memory into "dst" - in reverse order. mem_addr must be aligned on a 16-byte boundary or a general-protection - exception may be generated. - - dst[31:0] := MEM[mem_addr+127:mem_addr+96] - dst[63:32] := MEM[mem_addr+95:mem_addr+64] - dst[95:64] := MEM[mem_addr+63:mem_addr+32] - dst[127:96] := MEM[mem_addr+31:mem_addr] - - SSE -
immintrin.h
- Load + + + Load 4 single-precision (32-bit) floating-point elements from memory into "dst" in reverse order. mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +dst[31:0] := MEM[mem_addr+127:mem_addr+96] +dst[63:32] := MEM[mem_addr+95:mem_addr+64] +dst[95:64] := MEM[mem_addr+63:mem_addr+32] +dst[127:96] := MEM[mem_addr+31:mem_addr] + + SSE +
immintrin.h
+ Load
- - - - Move the lower single-precision (32-bit) floating-point element from "b" to the - lower element of "dst", and copy the upper 3 packed elements from "a" to the upper - elements of "dst". - - dst[31:0] := b[31:0] - dst[127:32] := a[127:32] - - - SSE -
xmmintrin.h
- Move + + + + Move the lower single-precision (32-bit) floating-point element from "b" to the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := b[31:0] +dst[127:32] := a[127:32] + + + SSE +
xmmintrin.h
+ Move
- - - - Move the upper 2 single-precision (32-bit) floating-point elements from "b" to - the lower 2 elements of "dst", and copy the upper 2 elements from "a" to the upper 2 - elements of "dst". - - dst[31:0] := b[95:64] - dst[63:32] := b[127:96] - dst[95:64] := a[95:64] - dst[127:96] := a[127:96] - - - SSE -
xmmintrin.h
- Move + + + + Move the upper 2 single-precision (32-bit) floating-point elements from "b" to the lower 2 elements of "dst", and copy the upper 2 elements from "a" to the upper 2 elements of "dst". + +dst[31:0] := b[95:64] +dst[63:32] := b[127:96] +dst[95:64] := a[95:64] +dst[127:96] := a[127:96] + + + SSE +
xmmintrin.h
+ Move
- - - - Move the lower 2 single-precision (32-bit) floating-point elements from "b" to - the upper 2 elements of "dst", and copy the lower 2 elements from "a" to the lower 2 - elements of "dst". - - dst[31:0] := a[31:0] - dst[63:32] := a[63:32] - dst[95:64] := b[31:0] - dst[127:96] := b[63:32] - - - SSE -
xmmintrin.h
- Move -
- - - - - - Return vector of type __m128d with undefined elements. - SSE2 -
emmintrin.h
- General Support + + + + Move the lower 2 single-precision (32-bit) floating-point elements from "b" to the upper 2 elements of "dst", and copy the lower 2 elements from "a" to the lower 2 elements of "dst". + +dst[31:0] := a[31:0] +dst[63:32] := a[63:32] +dst[95:64] := b[31:0] +dst[127:96] := b[63:32] + + + SSE +
xmmintrin.h
+ Move +
+ + + + + + Return vector of type __m128d with undefined elements. + SSE2 +
emmintrin.h
+ General Support
- - - Return vector of type __m128i with undefined elements. - SSE2 -
emmintrin.h
- General Support + + + Return vector of type __m128i with undefined elements. + SSE2 +
emmintrin.h
+ General Support
- - - Provide a hint to the processor that the code sequence is a spin-wait loop. - This can help improve the performance and power consumption of spin-wait loops. - - SSE2 -
emmintrin.h
- General Support + + + Provide a hint to the processor that the code sequence is a spin-wait loop. This can help improve the performance and power consumption of spin-wait loops. + + SSE2 +
emmintrin.h
+ General Support
- - - Invalidate and flush the cache line that contains "p" from all levels of the - cache hierarchy. - - SSE2 -
emmintrin.h
- General Support + + + Invalidate and flush the cache line that contains "p" from all levels of the cache hierarchy. + + SSE2 +
emmintrin.h
+ General Support
- - - Perform a serializing operation on all load-from-memory instructions that were - issued prior to this instruction. Guarantees that every load instruction that precedes, - in program order, is globally visible before any load instruction which follows the - fence in program order. - - SSE2 -
emmintrin.h
- General Support + + + Perform a serializing operation on all load-from-memory instructions that were issued prior to this instruction. Guarantees that every load instruction that precedes, in program order, is globally visible before any load instruction which follows the fence in program order. + + SSE2 +
emmintrin.h
+ General Support
- - - Perform a serializing operation on all load-from-memory and store-to-memory - instructions that were issued prior to this instruction. Guarantees that every memory - access that precedes, in program order, the memory fence instruction is globally visible - before any memory instruction which follows the fence in program order. - - SSE2 -
emmintrin.h
- General Support + + + Perform a serializing operation on all load-from-memory and store-to-memory instructions that were issued prior to this instruction. Guarantees that every memory access that precedes, in program order, the memory fence instruction is globally visible before any memory instruction which follows the fence in program order. + + SSE2 +
emmintrin.h
+ General Support
- - - Load unaligned 64-bit integer from memory into the first element of "dst". - - dst[63:0] := MEM[mem_addr+63:mem_addr] - dst[MAX:64] := 0 - - - SSE2 -
immintrin.h
- Load + + + Load unaligned 64-bit integer from memory into the first element of "dst". + +dst[63:0] := MEM[mem_addr+63:mem_addr] +dst[MAX:64] := 0 + + + SSE2 +
immintrin.h
+ Load
- - - Load unaligned 16-bit integer from memory into the first element of "dst". - - dst[15:0] := MEM[mem_addr+15:mem_addr] - dst[MAX:16] := 0 - - SSE2 -
immintrin.h
- Load + + + Load unaligned 16-bit integer from memory into the first element of "dst". + +dst[15:0] := MEM[mem_addr+15:mem_addr] +dst[MAX:16] := 0 + + SSE2 +
immintrin.h
+ Load
- - - Load unaligned 32-bit integer from memory into the first element of "dst". - - dst[31:0] := MEM[mem_addr+31:mem_addr] - dst[MAX:32] := 0 - - - SSE2 -
emmintrin.h
- Load + + + Load unaligned 32-bit integer from memory into the first element of "dst". + +dst[31:0] := MEM[mem_addr+31:mem_addr] +dst[MAX:32] := 0 + + + SSE2 +
emmintrin.h
+ Load
- - - Load 64-bit integer from memory into the first element of "dst". - - dst[63:0] := MEM[mem_addr+63:mem_addr] - dst[MAX:64] := 0 - - - SSE2 -
emmintrin.h
- Load + + + Load 64-bit integer from memory into the first element of "dst". + +dst[63:0] := MEM[mem_addr+63:mem_addr] +dst[MAX:64] := 0 + + + SSE2 +
emmintrin.h
+ Load
- - - Load 128-bits of integer data from memory into "dst". - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - dst[127:0] := MEM[mem_addr+127:mem_addr] - - - SSE2 -
emmintrin.h
- Load + + + Load 128-bits of integer data from memory into "dst". + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +dst[127:0] := MEM[mem_addr+127:mem_addr] + + + SSE2 +
emmintrin.h
+ Load
- - - Load 128-bits of integer data from memory into "dst". - "mem_addr" does not need to be aligned on any particular boundary. - - dst[127:0] := MEM[mem_addr+127:mem_addr] - - - SSE2 -
emmintrin.h
- Load + + + Load 128-bits of integer data from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[127:0] := MEM[mem_addr+127:mem_addr] + + + SSE2 +
emmintrin.h
+ Load
- - - Load 128-bits (composed of 2 packed double-precision (64-bit) floating-point - elements) from memory into "dst". - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - dst[127:0] := MEM[mem_addr+127:mem_addr] - - - SSE2 -
emmintrin.h
- Load + + + Load 128-bits (composed of 2 packed double-precision (64-bit) floating-point elements) from memory into "dst". + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +dst[127:0] := MEM[mem_addr+127:mem_addr] + + + SSE2 +
emmintrin.h
+ Load
- - - Load a double-precision (64-bit) floating-point element from memory into both - elements of "dst". - - dst[63:0] := MEM[mem_addr+63:mem_addr] - dst[127:64] := MEM[mem_addr+63:mem_addr] - - - SSE2 -
emmintrin.h
- Load + + + Load a double-precision (64-bit) floating-point element from memory into both elements of "dst". + +dst[63:0] := MEM[mem_addr+63:mem_addr] +dst[127:64] := MEM[mem_addr+63:mem_addr] + + + SSE2 +
emmintrin.h
+ Load
- - - Load a double-precision (64-bit) floating-point element from memory into both - elements of "dst". - - dst[63:0] := MEM[mem_addr+63:mem_addr] - dst[127:64] := MEM[mem_addr+63:mem_addr] - - - SSE2 -
emmintrin.h
- Load + + + Load a double-precision (64-bit) floating-point element from memory into both elements of "dst". + +dst[63:0] := MEM[mem_addr+63:mem_addr] +dst[127:64] := MEM[mem_addr+63:mem_addr] + + + SSE2 +
emmintrin.h
+ Load
- - - Load 2 double-precision (64-bit) floating-point elements from memory into "dst" - in reverse order. mem_addr must be aligned on a 16-byte boundary or a general-protection - exception may be generated. - - dst[63:0] := MEM[mem_addr+127:mem_addr+64] - dst[127:64] := MEM[mem_addr+63:mem_addr] - - - SSE2 -
emmintrin.h
- Load + + + Load 2 double-precision (64-bit) floating-point elements from memory into "dst" in reverse order. mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +dst[63:0] := MEM[mem_addr+127:mem_addr+64] +dst[127:64] := MEM[mem_addr+63:mem_addr] + + + SSE2 +
emmintrin.h
+ Load
- - - Load 128-bits (composed of 2 packed double-precision (64-bit) floating-point - elements) from memory into "dst". - "mem_addr" does not need to be aligned on any particular boundary. - - dst[127:0] := MEM[mem_addr+127:mem_addr] - - - SSE2 -
emmintrin.h
- Load + + + Load 128-bits (composed of 2 packed double-precision (64-bit) floating-point elements) from memory into "dst". + "mem_addr" does not need to be aligned on any particular boundary. + +dst[127:0] := MEM[mem_addr+127:mem_addr] + + + SSE2 +
emmintrin.h
+ Load
- - - Load a double-precision (64-bit) floating-point element from memory into the - lower of "dst", and zero the upper element. "mem_addr" does not need to be aligned on - any particular boundary. - - dst[63:0] := MEM[mem_addr+63:mem_addr] - dst[127:64] := 0 - - - SSE2 -
emmintrin.h
- Load + + + Load a double-precision (64-bit) floating-point element from memory into the lower of "dst", and zero the upper element. "mem_addr" does not need to be aligned on any particular boundary. + +dst[63:0] := MEM[mem_addr+63:mem_addr] +dst[127:64] := 0 + + + SSE2 +
emmintrin.h
+ Load
- - - - Load a double-precision (64-bit) floating-point element from memory into the - upper element of "dst", and copy the lower element from "a" to "dst". "mem_addr" does - not need to be aligned on any particular boundary. - - dst[63:0] := a[63:0] - dst[127:64] := MEM[mem_addr+63:mem_addr] - - - SSE2 -
emmintrin.h
- Load + + + + Load a double-precision (64-bit) floating-point element from memory into the upper element of "dst", and copy the lower element from "a" to "dst". "mem_addr" does not need to be aligned on any particular boundary. + +dst[63:0] := a[63:0] +dst[127:64] := MEM[mem_addr+63:mem_addr] + + + SSE2 +
emmintrin.h
+ Load
- - - - Load a double-precision (64-bit) floating-point element from memory into the - lower element of "dst", and copy the upper element from "a" to "dst". "mem_addr" does - not need to be aligned on any particular boundary. - - dst[63:0] := MEM[mem_addr+63:mem_addr] - dst[127:64] := a[127:64] - - - SSE2 -
emmintrin.h
- Load + + + + Load a double-precision (64-bit) floating-point element from memory into the lower element of "dst", and copy the upper element from "a" to "dst". "mem_addr" does not need to be aligned on any particular boundary. + +dst[63:0] := MEM[mem_addr+63:mem_addr] +dst[127:64] := a[127:64] + + + SSE2 +
emmintrin.h
+ Load
- - - - Store 16-bit integer from the first element of "a" into memory. "mem_addr" does - not need to be aligned on any particular boundary. - - MEM[mem_addr+15:mem_addr] := a[15:0] - - SSE2 -
immintrin.h
- Store + + + + Store 16-bit integer from the first element of "a" into memory. "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+15:mem_addr] := a[15:0] + + SSE2 +
immintrin.h
+ Store
- - - - Store 64-bit integer from the first element of "a" into memory. "mem_addr" does - not need to be aligned on any particular boundary. - - MEM[mem_addr+63:mem_addr] := a[63:0] - - - SSE2 -
immintrin.h
- Store + + + + Store 64-bit integer from the first element of "a" into memory. "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+63:mem_addr] := a[63:0] + + + SSE2 +
immintrin.h
+ Store
- - - - Store 32-bit integer from the first element of "a" into memory. "mem_addr" does - not need to be aligned on any particular boundary. - - MEM[mem_addr+31:mem_addr] := a[31:0] - - - SSE2 -
emmintrin.h
- Store + + + + Store 32-bit integer from the first element of "a" into memory. "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+31:mem_addr] := a[31:0] + + + SSE2 +
emmintrin.h
+ Store
- - - - - Conditionally store 8-bit integer elements from "a" into memory using "mask" - (elements are not stored when the highest bit is not set in the corresponding element) - and a non-temporal memory hint. "mem_addr" does not need to be aligned on any particular - boundary. - - FOR j := 0 to 15 - i := j*8 - IF mask[i+7] - MEM[mem_addr+i+7:mem_addr+i] := a[i+7:i] - FI - ENDFOR - - - SSE2 -
emmintrin.h
- Store + + + + + Conditionally store 8-bit integer elements from "a" into memory using "mask" (elements are not stored when the highest bit is not set in the corresponding element) and a non-temporal memory hint. "mem_addr" does not need to be aligned on any particular boundary. + +FOR j := 0 to 15 + i := j*8 + IF mask[i+7] + MEM[mem_addr+i+7:mem_addr+i] := a[i+7:i] + FI +ENDFOR + + + SSE2 +
emmintrin.h
+ Store
- - - - Store 128-bits of integer data from "a" into memory. - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+127:mem_addr] := a[127:0] - - - SSE2 -
emmintrin.h
- Store + + + + Store 128-bits of integer data from "a" into memory. + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+127:mem_addr] := a[127:0] + + + SSE2 +
emmintrin.h
+ Store
- - - - Store 128-bits of integer data from "a" into memory. - "mem_addr" does not need to be aligned on any particular boundary. - - MEM[mem_addr+127:mem_addr] := a[127:0] - - - SSE2 -
emmintrin.h
- Store + + + + Store 128-bits of integer data from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+127:mem_addr] := a[127:0] + + + SSE2 +
emmintrin.h
+ Store
- - - - Store 64-bit integer from the first element of "a" into memory. - - MEM[mem_addr+63:mem_addr] := a[63:0] - - - SSE2 -
emmintrin.h
- Store + + + + Store 64-bit integer from the first element of "a" into memory. + +MEM[mem_addr+63:mem_addr] := a[63:0] + + + SSE2 +
emmintrin.h
+ Store
- - - - Store 128-bits of integer data from "a" into memory using a non-temporal memory - hint. - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+127:mem_addr] := a[127:0] - - - SSE2 -
emmintrin.h
- Store + + + + Store 128-bits of integer data from "a" into memory using a non-temporal memory hint. + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+127:mem_addr] := a[127:0] + + + SSE2 +
emmintrin.h
+ Store
- - - - Store 32-bit integer "a" into memory using a non-temporal hint to minimize - cache pollution. If the cache line containing address "mem_addr" is already in the - cache, the cache will be updated. - - MEM[mem_addr+31:mem_addr] := a[31:0] - - - SSE2 -
emmintrin.h
- Store + + + + Store 32-bit integer "a" into memory using a non-temporal hint to minimize cache pollution. If the cache line containing address "mem_addr" is already in the cache, the cache will be updated. + +MEM[mem_addr+31:mem_addr] := a[31:0] + + + SSE2 +
emmintrin.h
+ Store
- - - - Store 64-bit integer "a" into memory using a non-temporal hint to minimize - cache pollution. If the cache line containing address "mem_addr" is already in the - cache, the cache will be updated. - - MEM[mem_addr+63:mem_addr] := a[63:0] - - - SSE2 -
emmintrin.h
- Store + + + + Store 64-bit integer "a" into memory using a non-temporal hint to minimize cache pollution. If the cache line containing address "mem_addr" is already in the cache, the cache will be updated. + +MEM[mem_addr+63:mem_addr] := a[63:0] + + + SSE2 +
emmintrin.h
+ Store
- - - - Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point - elements) from "a" into memory using a non-temporal memory hint. - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+127:mem_addr] := a[127:0] - - - SSE2 -
emmintrin.h
- Store + + + + Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a" into memory using a non-temporal memory hint. + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+127:mem_addr] := a[127:0] + + + SSE2 +
emmintrin.h
+ Store
- - - - Store the lower double-precision (64-bit) floating-point element from "a" into - memory. "mem_addr" does not need to be aligned on any particular boundary. - - MEM[mem_addr+63:mem_addr] := a[63:0] - - - SSE2 -
emmintrin.h
- Store + + + + Store the lower double-precision (64-bit) floating-point element from "a" into memory. "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+63:mem_addr] := a[63:0] + + + SSE2 +
emmintrin.h
+ Store
- - - - Store the lower double-precision (64-bit) floating-point element from "a" into - 2 contiguous elements in memory. "mem_addr" must be aligned on a 16-byte boundary or a - general-protection exception may be generated. - - MEM[mem_addr+63:mem_addr] := a[63:0] - MEM[mem_addr+127:mem_addr+64] := a[63:0] - - SSE2 -
emmintrin.h
- Store + + + + Store the lower double-precision (64-bit) floating-point element from "a" into 2 contiguous elements in memory. "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+63:mem_addr] := a[63:0] +MEM[mem_addr+127:mem_addr+64] := a[63:0] + + SSE2 +
emmintrin.h
+ Store
- - - - Store the lower double-precision (64-bit) floating-point element from "a" into - 2 contiguous elements in memory. "mem_addr" must be aligned on a 16-byte boundary or a - general-protection exception may be generated. - - MEM[mem_addr+63:mem_addr] := a[63:0] - MEM[mem_addr+127:mem_addr+64] := a[63:0] - - SSE2 -
emmintrin.h
- Store + + + + Store the lower double-precision (64-bit) floating-point element from "a" into 2 contiguous elements in memory. "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+63:mem_addr] := a[63:0] +MEM[mem_addr+127:mem_addr+64] := a[63:0] + + SSE2 +
emmintrin.h
+ Store
- - - - Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point - elements) from "a" into memory. - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+127:mem_addr] := a[127:0] - - - SSE2 -
emmintrin.h
- Store + + + + Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a" into memory. + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+127:mem_addr] := a[127:0] + + + SSE2 +
emmintrin.h
+ Store
- - - - Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point - elements) from "a" into memory. - "mem_addr" does not need to be aligned on any particular boundary. - - MEM[mem_addr+127:mem_addr] := a[127:0] - - - SSE2 -
emmintrin.h
- Store + + + + Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a" into memory. + "mem_addr" does not need to be aligned on any particular boundary. + +MEM[mem_addr+127:mem_addr] := a[127:0] + + + SSE2 +
emmintrin.h
+ Store
- - - - Store 2 double-precision (64-bit) floating-point elements from "a" into memory - in reverse order. - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - MEM[mem_addr+63:mem_addr] := a[127:64] - MEM[mem_addr+127:mem_addr+64] := a[63:0] - - SSE2 -
emmintrin.h
- Store + + + + Store 2 double-precision (64-bit) floating-point elements from "a" into memory in reverse order. + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +MEM[mem_addr+63:mem_addr] := a[127:64] +MEM[mem_addr+127:mem_addr+64] := a[63:0] + + SSE2 +
emmintrin.h
+ Store
- - - - Store the upper double-precision (64-bit) floating-point element from "a" into - memory. - - MEM[mem_addr+63:mem_addr] := a[127:64] - - - SSE2 -
emmintrin.h
- Store + + + + Store the upper double-precision (64-bit) floating-point element from "a" into memory. + +MEM[mem_addr+63:mem_addr] := a[127:64] + + + SSE2 +
emmintrin.h
+ Store
- - - - Store the lower double-precision (64-bit) floating-point element from "a" into - memory. - - MEM[mem_addr+63:mem_addr] := a[63:0] - - - SSE2 -
emmintrin.h
- Store + + + + Store the lower double-precision (64-bit) floating-point element from "a" into memory. + +MEM[mem_addr+63:mem_addr] := a[63:0] + + + SSE2 +
emmintrin.h
+ Store
- - - - Add packed 8-bit integers in "a" and "b", and store the results in "dst". - - FOR j := 0 to 15 - i := j*8 - dst[i+7:i] := a[i+7:i] + b[i+7:i] - ENDFOR - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Add packed 8-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := a[i+7:i] + b[i+7:i] +ENDFOR + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Add packed 16-bit integers in "a" and "b", and store the results in "dst". - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := a[i+15:i] + b[i+15:i] - ENDFOR - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Add packed 16-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := a[i+15:i] + b[i+15:i] +ENDFOR + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Add packed 32-bit integers in "a" and "b", and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := a[i+31:i] + b[i+31:i] - ENDFOR - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Add packed 32-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[i+31:i] + b[i+31:i] +ENDFOR + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Add 64-bit integers "a" and "b", and store the result in "dst". - - dst[63:0] := a[63:0] + b[63:0] - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Add 64-bit integers "a" and "b", and store the result in "dst". + +dst[63:0] := a[63:0] + b[63:0] + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Add packed 64-bit integers in "a" and "b", and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := a[i+63:i] + b[i+63:i] - ENDFOR - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Add packed 64-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[i+63:i] + b[i+63:i] +ENDFOR + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Add packed signed 8-bit integers in "a" and "b" using saturation, and store the - results in "dst". - - FOR j := 0 to 15 - i := j*8 - dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] ) - ENDFOR - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Add packed signed 8-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] ) +ENDFOR + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Add packed signed 16-bit integers in "a" and "b" using saturation, and store - the results in "dst". - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] ) - ENDFOR - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Add packed signed 16-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] ) +ENDFOR + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store - the results in "dst". - - FOR j := 0 to 15 - i := j*8 - dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] ) - ENDFOR - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] ) +ENDFOR + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store - the results in "dst". - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] ) - ENDFOR - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] ) +ENDFOR + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Multiply packed signed 16-bit integers in "a" and "b", producing intermediate - signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, - and pack the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + - SignExtend32(a[i+15:i]*b[i+15:i]) - ENDFOR - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + SignExtend32(a[i+15:i]*b[i+15:i]) +ENDFOR + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Multiply the packed signed 16-bit integers in "a" and "b", producing - intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in - "dst". - - FOR j := 0 to 7 - i := j*16 - tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) - dst[i+15:i] := tmp[31:16] - ENDFOR - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Multiply the packed signed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 7 + i := j*16 + tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) + dst[i+15:i] := tmp[31:16] +ENDFOR + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Multiply the packed unsigned 16-bit integers in "a" and "b", producing - intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in - "dst". - - FOR j := 0 to 7 - i := j*16 - tmp[31:0] := a[i+15:i] * b[i+15:i] - dst[i+15:i] := tmp[31:16] - ENDFOR - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 7 + i := j*16 + tmp[31:0] := a[i+15:i] * b[i+15:i] + dst[i+15:i] := tmp[31:16] +ENDFOR + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Multiply the packed 16-bit integers in "a" and "b", producing intermediate - 32-bit integers, and store the low 16 bits of the intermediate integers in "dst". - - FOR j := 0 to 7 - i := j*16 - tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) - dst[i+15:i] := tmp[15:0] - ENDFOR - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst". + +FOR j := 0 to 7 + i := j*16 + tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i]) + dst[i+15:i] := tmp[15:0] +ENDFOR + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Multiply the low unsigned 32-bit integers from "a" and "b", and store the - unsigned 64-bit result in "dst". - - dst[63:0] := a[31:0] * b[31:0] - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Multiply the low unsigned 32-bit integers from "a" and "b", and store the unsigned 64-bit result in "dst". + +dst[63:0] := a[31:0] * b[31:0] + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Multiply the low unsigned 32-bit integers from each packed 64-bit element in - "a" and "b", and store the unsigned 64-bit results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := a[i+31:i] * b[i+31:i] - ENDFOR - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[i+31:i] * b[i+31:i] +ENDFOR + + + SSE2 +
emmintrin.h
+ Arithmetic
- Miscellaneous - - - - Compute the absolute differences of packed unsigned 8-bit integers in "a" and - "b", then horizontally sum each consecutive 8 differences to produce two unsigned 16-bit - integers, and pack these unsigned 16-bit integers in the low 16 bits of 64-bit elements - in "dst". - - FOR j := 0 to 15 - i := j*8 - tmp[i+7:i] := ABS(a[i+7:i] - b[i+7:i]) - ENDFOR - FOR j := 0 to 1 - i := j*64 - dst[i+15:i] := tmp[i+7:i] + tmp[i+15:i+8] + tmp[i+23:i+16] + tmp[i+31:i+24] + \ - tmp[i+39:i+32] + tmp[i+47:i+40] + tmp[i+55:i+48] + tmp[i+63:i+56] - dst[i+63:i+16] := 0 - ENDFOR - - - SSE2 -
emmintrin.h
- Arithmetic + Miscellaneous + + + + Compute the absolute differences of packed unsigned 8-bit integers in "a" and "b", then horizontally sum each consecutive 8 differences to produce two unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low 16 bits of 64-bit elements in "dst". + +FOR j := 0 to 15 + i := j*8 + tmp[i+7:i] := ABS(a[i+7:i] - b[i+7:i]) +ENDFOR +FOR j := 0 to 1 + i := j*64 + dst[i+15:i] := tmp[i+7:i] + tmp[i+15:i+8] + tmp[i+23:i+16] + tmp[i+31:i+24] + \ + tmp[i+39:i+32] + tmp[i+47:i+40] + tmp[i+55:i+48] + tmp[i+63:i+56] + dst[i+63:i+16] := 0 +ENDFOR + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and - store the results in "dst". - - FOR j := 0 to 15 - i := j*8 - dst[i+7:i] := a[i+7:i] - b[i+7:i] - ENDFOR - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := a[i+7:i] - b[i+7:i] +ENDFOR + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and - store the results in "dst". - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := a[i+15:i] - b[i+15:i] - ENDFOR - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := a[i+15:i] - b[i+15:i] +ENDFOR + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and - store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := a[i+31:i] - b[i+31:i] - ENDFOR - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[i+31:i] - b[i+31:i] +ENDFOR + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Subtract 64-bit integer "b" from 64-bit integer "a", and store the result in - "dst". - - dst[63:0] := a[63:0] - b[63:0] - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Subtract 64-bit integer "b" from 64-bit integer "a", and store the result in "dst". + +dst[63:0] := a[63:0] - b[63:0] + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and - store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := a[i+63:i] - b[i+63:i] - ENDFOR - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[i+63:i] - b[i+63:i] +ENDFOR + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" - using saturation, and store the results in "dst". - - FOR j := 0 to 15 - i := j*8 - dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i]) - ENDFOR - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i]) +ENDFOR + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in - "a" using saturation, and store the results in "dst". - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i]) - ENDFOR - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i]) +ENDFOR + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit - integers in "a" using saturation, and store the results in "dst". - - FOR j := 0 to 15 - i := j*8 - dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i]) - ENDFOR - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i]) +ENDFOR + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit - integers in "a" using saturation, and store the results in "dst". - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i]) - ENDFOR - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i]) +ENDFOR + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Add the lower double-precision (64-bit) floating-point element in "a" and "b", - store the result in the lower element of "dst", and copy the upper element from "a" to - the upper element of "dst". - - dst[63:0] := a[63:0] + b[63:0] - dst[127:64] := a[127:64] - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Add the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := a[63:0] + b[63:0] +dst[127:64] := a[127:64] + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Add packed double-precision (64-bit) floating-point elements in "a" and "b", - and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := a[i+63:i] + b[i+63:i] - ENDFOR - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[i+63:i] + b[i+63:i] +ENDFOR + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Divide the lower double-precision (64-bit) floating-point element in "a" by the - lower double-precision (64-bit) floating-point element in "b", store the result in the - lower element of "dst", and copy the upper element from "a" to the upper element of - "dst". - - dst[63:0] := a[63:0] / b[63:0] - dst[127:64] := a[127:64] - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Divide the lower double-precision (64-bit) floating-point element in "a" by the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := a[63:0] / b[63:0] +dst[127:64] := a[127:64] + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Divide packed double-precision (64-bit) floating-point elements in "a" by - packed elements in "b", and store the results in "dst". - - FOR j := 0 to 1 - i := 64*j - dst[i+63:i] := a[i+63:i] / b[i+63:i] - ENDFOR - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + dst[i+63:i] := a[i+63:i] / b[i+63:i] +ENDFOR + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Multiply the lower double-precision (64-bit) floating-point element in "a" and - "b", store the result in the lower element of "dst", and copy the upper element from "a" - to the upper element of "dst". - - dst[63:0] := a[63:0] * b[63:0] - dst[127:64] := a[127:64] - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Multiply the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := a[63:0] * b[63:0] +dst[127:64] := a[127:64] + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Multiply packed double-precision (64-bit) floating-point elements in "a" and - "b", and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := a[i+63:i] * b[i+63:i] - ENDFOR - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[i+63:i] * b[i+63:i] +ENDFOR + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Subtract the lower double-precision (64-bit) floating-point element in "b" from - the lower double-precision (64-bit) floating-point element in "a", store the result in - the lower element of "dst", and copy the upper element from "a" to the upper element of - "dst". - - dst[63:0] := a[63:0] - b[63:0] - dst[127:64] := a[127:64] - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Subtract the lower double-precision (64-bit) floating-point element in "b" from the lower double-precision (64-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := a[63:0] - b[63:0] +dst[127:64] := a[127:64] + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Subtract packed double-precision (64-bit) floating-point elements in "b" from - packed double-precision (64-bit) floating-point elements in "a", and store the results - in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := a[i+63:i] - b[i+63:i] - ENDFOR - - - SSE2 -
emmintrin.h
- Arithmetic + + + + Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[i+63:i] - b[i+63:i] +ENDFOR + + + SSE2 +
emmintrin.h
+ Arithmetic
- - - - Average packed unsigned 8-bit integers in "a" and "b", and store the results in - "dst". - - FOR j := 0 to 15 - i := j*8 - dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 - ENDFOR - - - SSE2 -
emmintrin.h
- Probability/Statistics + + + + Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 +ENDFOR + + + SSE2 +
emmintrin.h
+ Probability/Statistics
- - - - Average packed unsigned 16-bit integers in "a" and "b", and store the results - in "dst". - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 - ENDFOR - - - SSE2 -
emmintrin.h
- Probability/Statistics + + + + Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 +ENDFOR + + + SSE2 +
emmintrin.h
+ Probability/Statistics
- - - - Compare packed signed 16-bit integers in "a" and "b", and store packed maximum - values in "dst". - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) - ENDFOR - - - SSE2 -
emmintrin.h
- Special Math Functions + + + + Compare packed signed 16-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) +ENDFOR + + + SSE2 +
emmintrin.h
+ Special Math Functions
- - - - Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum - values in "dst". - - FOR j := 0 to 15 - i := j*8 - dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) - ENDFOR - - - SSE2 -
emmintrin.h
- Special Math Functions + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) +ENDFOR + + + SSE2 +
emmintrin.h
+ Special Math Functions
- - - - Compare packed signed 16-bit integers in "a" and "b", and store packed minimum - values in "dst". - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) - ENDFOR - - - SSE2 -
emmintrin.h
- Special Math Functions + + + + Compare packed signed 16-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) +ENDFOR + + + SSE2 +
emmintrin.h
+ Special Math Functions
- - - - Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum - values in "dst". - - FOR j := 0 to 15 - i := j*8 - dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) - ENDFOR - - - SSE2 -
emmintrin.h
- Special Math Functions + + + + Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) +ENDFOR + + + SSE2 +
emmintrin.h
+ Special Math Functions
- - - - Compare the lower double-precision (64-bit) floating-point elements in "a" and - "b", store the maximum value in the lower element of "dst", and copy the upper element - from "a" to the upper element of "dst". [max_float_note] - - dst[63:0] := MAX(a[63:0], b[63:0]) - dst[127:64] := a[127:64] - - - SSE2 -
emmintrin.h
- Special Math Functions + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". [max_float_note] + +dst[63:0] := MAX(a[63:0], b[63:0]) +dst[127:64] := a[127:64] + + + SSE2 +
emmintrin.h
+ Special Math Functions
- - - - Compare packed double-precision (64-bit) floating-point elements in "a" and - "b", and store packed maximum values in "dst". [max_float_note] - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) - ENDFOR - - - SSE2 -
emmintrin.h
- Special Math Functions + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst". [max_float_note] + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := MAX(a[i+63:i], b[i+63:i]) +ENDFOR + + + SSE2 +
emmintrin.h
+ Special Math Functions
- - - - Compare the lower double-precision (64-bit) floating-point elements in "a" and - "b", store the minimum value in the lower element of "dst", and copy the upper element - from "a" to the upper element of "dst". [min_float_note] - - dst[63:0] := MIN(a[63:0], b[63:0]) - dst[127:64] := a[127:64] - - - SSE2 -
emmintrin.h
- Special Math Functions + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". [min_float_note] + +dst[63:0] := MIN(a[63:0], b[63:0]) +dst[127:64] := a[127:64] + + + SSE2 +
emmintrin.h
+ Special Math Functions
- - - - Compare packed double-precision (64-bit) floating-point elements in "a" and - "b", and store packed minimum values in "dst". [min_float_note] - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) - ENDFOR - - - SSE2 -
emmintrin.h
- Special Math Functions + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst". [min_float_note] + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := MIN(a[i+63:i], b[i+63:i]) +ENDFOR + + + SSE2 +
emmintrin.h
+ Special Math Functions
- - - - Shift "a" left by "imm8" bytes while shifting in zeros, and store the results - in "dst". - - tmp := imm8[7:0] - IF tmp > 15 - tmp := 16 - FI - dst[127:0] := a[127:0] << (tmp*8) - - - SSE2 -
emmintrin.h
- Shift + + + + Shift "a" left by "imm8" bytes while shifting in zeros, and store the results in "dst". + +tmp := imm8[7:0] +IF tmp > 15 + tmp := 16 +FI +dst[127:0] := a[127:0] << (tmp*8) + + + SSE2 +
emmintrin.h
+ Shift
- - - - Shift "a" left by "imm8" bytes while shifting in zeros, and store the results - in "dst". - - tmp := imm8[7:0] - IF tmp > 15 - tmp := 16 - FI - dst[127:0] := a[127:0] << (tmp*8) - - - SSE2 -
emmintrin.h
- Shift + + + + Shift "a" left by "imm8" bytes while shifting in zeros, and store the results in "dst". + +tmp := imm8[7:0] +IF tmp > 15 + tmp := 16 +FI +dst[127:0] := a[127:0] << (tmp*8) + + + SSE2 +
emmintrin.h
+ Shift
- - - - Shift "a" right by "imm8" bytes while shifting in zeros, and store the results - in "dst". - - tmp := imm8[7:0] - IF tmp > 15 - tmp := 16 - FI - dst[127:0] := a[127:0] >> (tmp*8) - - - SSE2 -
emmintrin.h
- Shift + + + + Shift "a" right by "imm8" bytes while shifting in zeros, and store the results in "dst". + +tmp := imm8[7:0] +IF tmp > 15 + tmp := 16 +FI +dst[127:0] := a[127:0] >> (tmp*8) + + + SSE2 +
emmintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst". - - FOR j := 0 to 7 - i := j*16 - IF imm8[7:0] > 15 - dst[i+15:i] := 0 - ELSE - dst[i+15:i] := ZeroExtend16(a[i+15:i] << imm8[7:0]) - FI - ENDFOR - - - SSE2 -
emmintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend16(a[i+15:i] << imm8[7:0]) + FI +ENDFOR + + + SSE2 +
emmintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 7 - i := j*16 - IF count[63:0] > 15 - dst[i+15:i] := 0 - ELSE - dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[63:0]) - FI - ENDFOR - - - SSE2 -
emmintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[63:0]) + FI +ENDFOR + + + SSE2 +
emmintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - IF imm8[7:0] > 31 - dst[i+31:i] := 0 - ELSE - dst[i+31:i] := ZeroExtend32(a[i+31:i] << imm8[7:0]) - FI - ENDFOR - - - SSE2 -
emmintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend32(a[i+31:i] << imm8[7:0]) + FI +ENDFOR + + + SSE2 +
emmintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - IF count[63:0] > 31 - dst[i+31:i] := 0 - ELSE - dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[63:0]) - FI - ENDFOR - - - SSE2 -
emmintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[63:0]) + FI +ENDFOR + + + SSE2 +
emmintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and - store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - IF imm8[7:0] > 63 - dst[i+63:i] := 0 - ELSE - dst[i+63:i] := ZeroExtend64(a[i+63:i] << imm8[7:0]) - FI - ENDFOR - - - SSE2 -
emmintrin.h
- Shift + + + + Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF imm8[7:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend64(a[i+63:i] << imm8[7:0]) + FI +ENDFOR + + + SSE2 +
emmintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - IF count[63:0] > 63 - dst[i+63:i] := 0 - ELSE - dst[i+63:i] := ZeroExtend64(a[i+63:i] << count[63:0]) - FI - ENDFOR - - - SSE2 -
emmintrin.h
- Shift + + + + Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF count[63:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend64(a[i+63:i] << count[63:0]) + FI +ENDFOR + + + SSE2 +
emmintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst". - - FOR j := 0 to 7 - i := j*16 - IF imm8[7:0] > 15 - dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) - ELSE - dst[i+15:i] := SignExtend16(a[i+15:i] >> imm8[7:0]) - FI - ENDFOR - - - SSE2 -
emmintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) + ELSE + dst[i+15:i] := SignExtend16(a[i+15:i] >> imm8[7:0]) + FI +ENDFOR + + + SSE2 +
emmintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst". - - FOR j := 0 to 7 - i := j*16 - IF count[63:0] > 15 - dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) - ELSE - dst[i+15:i] := SignExtend16(a[i+15:i] >> count[63:0]) - FI - ENDFOR - - - SSE2 -
emmintrin.h
- Shift -
- - - - - Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign - bits, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - IF imm8[7:0] > 31 - dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) - ELSE - dst[i+31:i] := SignExtend32(a[i+31:i] >> imm8[7:0]) - FI - ENDFOR - - - SSE2 -
emmintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) + ELSE + dst[i+15:i] := SignExtend16(a[i+15:i] >> count[63:0]) + FI +ENDFOR + + + SSE2 +
emmintrin.h
+ Shift
- - - - - Shift packed 32-bit integers in "a" right by "count" while shifting in sign - bits, and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - IF count[63:0] > 31 - dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) - ELSE - dst[i+31:i] := SignExtend32(a[i+31:i] >> count[63:0]) - FI - ENDFOR - - - SSE2 -
emmintrin.h
- Shift + + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) + ELSE + dst[i+31:i] := SignExtend32(a[i+31:i] >> imm8[7:0]) + FI +ENDFOR + + + SSE2 +
emmintrin.h
+ Shift +
+ + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) + ELSE + dst[i+31:i] := SignExtend32(a[i+31:i] >> count[63:0]) + FI +ENDFOR + + + SSE2 +
emmintrin.h
+ Shift
- - - - Shift "a" right by "imm8" bytes while shifting in zeros, and store the results - in "dst". - - tmp := imm8[7:0] - IF tmp > 15 - tmp := 16 - FI - dst[127:0] := a[127:0] >> (tmp*8) - - - SSE2 -
emmintrin.h
- Shift + + + + Shift "a" right by "imm8" bytes while shifting in zeros, and store the results in "dst". + +tmp := imm8[7:0] +IF tmp > 15 + tmp := 16 +FI +dst[127:0] := a[127:0] >> (tmp*8) + + + SSE2 +
emmintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 7 - i := j*16 - IF imm8[7:0] > 15 - dst[i+15:i] := 0 - ELSE - dst[i+15:i] := ZeroExtend16(a[i+15:i] >> imm8[7:0]) - FI - ENDFOR - - - SSE2 -
emmintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + IF imm8[7:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend16(a[i+15:i] >> imm8[7:0]) + FI +ENDFOR + + + SSE2 +
emmintrin.h
+ Shift
- - - - Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 7 - i := j*16 - IF count[63:0] > 15 - dst[i+15:i] := 0 - ELSE - dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[63:0]) - FI - ENDFOR - - - SSE2 -
emmintrin.h
- Shift + + + + Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + IF count[63:0] > 15 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[63:0]) + FI +ENDFOR + + + SSE2 +
emmintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - IF imm8[7:0] > 31 - dst[i+31:i] := 0 - ELSE - dst[i+31:i] := ZeroExtend32(a[i+31:i] >> imm8[7:0]) - FI - ENDFOR - - - SSE2 -
emmintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF imm8[7:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend32(a[i+31:i] >> imm8[7:0]) + FI +ENDFOR + + + SSE2 +
emmintrin.h
+ Shift
- - - - Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - IF count[63:0] > 31 - dst[i+31:i] := 0 - ELSE - dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[63:0]) - FI - ENDFOR - - - SSE2 -
emmintrin.h
- Shift + + + + Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF count[63:0] > 31 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[63:0]) + FI +ENDFOR + + + SSE2 +
emmintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - IF imm8[7:0] > 63 - dst[i+63:i] := 0 - ELSE - dst[i+63:i] := ZeroExtend64(a[i+63:i] >> imm8[7:0]) - FI - ENDFOR - - - SSE2 -
emmintrin.h
- Shift + + + + Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF imm8[7:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend64(a[i+63:i] >> imm8[7:0]) + FI +ENDFOR + + + SSE2 +
emmintrin.h
+ Shift
- - - - Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, - and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - IF count[63:0] > 63 - dst[i+63:i] := 0 - ELSE - dst[i+63:i] := ZeroExtend64(a[i+63:i] >> count[63:0]) - FI - ENDFOR - - - SSE2 -
emmintrin.h
- Shift + + + + Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF count[63:0] > 63 + dst[i+63:i] := 0 + ELSE + dst[i+63:i] := ZeroExtend64(a[i+63:i] >> count[63:0]) + FI +ENDFOR + + + SSE2 +
emmintrin.h
+ Shift
- - - - Compute the bitwise AND of 128 bits (representing integer data) in "a" and "b", - and store the result in "dst". - - dst[127:0] := (a[127:0] AND b[127:0]) - - - SSE2 -
emmintrin.h
- Logical + + + + Compute the bitwise AND of 128 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[127:0] := (a[127:0] AND b[127:0]) + + + SSE2 +
emmintrin.h
+ Logical
- - - - Compute the bitwise NOT of 128 bits (representing integer data) in "a" and then - AND with "b", and store the result in "dst". - - dst[127:0] := ((NOT a[127:0]) AND b[127:0]) - - - SSE2 -
emmintrin.h
- Logical + + + + Compute the bitwise NOT of 128 bits (representing integer data) in "a" and then AND with "b", and store the result in "dst". + +dst[127:0] := ((NOT a[127:0]) AND b[127:0]) + + + SSE2 +
emmintrin.h
+ Logical
- - - - Compute the bitwise OR of 128 bits (representing integer data) in "a" and "b", - and store the result in "dst". - - dst[127:0] := (a[127:0] OR b[127:0]) - - - SSE2 -
emmintrin.h
- Logical + + + + Compute the bitwise OR of 128 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[127:0] := (a[127:0] OR b[127:0]) + + + SSE2 +
emmintrin.h
+ Logical
- - - - Compute the bitwise XOR of 128 bits (representing integer data) in "a" and "b", - and store the result in "dst". - - dst[127:0] := (a[127:0] XOR b[127:0]) - - - SSE2 -
emmintrin.h
- Logical + + + + Compute the bitwise XOR of 128 bits (representing integer data) in "a" and "b", and store the result in "dst". + +dst[127:0] := (a[127:0] XOR b[127:0]) + + + SSE2 +
emmintrin.h
+ Logical
- - - - Compute the bitwise AND of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) - ENDFOR - - - SSE2 -
emmintrin.h
- Logical + + + + Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (a[i+63:i] AND b[i+63:i]) +ENDFOR + + + SSE2 +
emmintrin.h
+ Logical
- - - - Compute the bitwise NOT of packed double-precision (64-bit) floating-point - elements in "a" and then AND with "b", and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) - ENDFOR - - - SSE2 -
emmintrin.h
- Logical + + + + Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) +ENDFOR + + + SSE2 +
emmintrin.h
+ Logical
- - - - Compute the bitwise OR of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := a[i+63:i] OR b[i+63:i] - ENDFOR - - - SSE2 -
emmintrin.h
- Logical + + + + Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[i+63:i] OR b[i+63:i] +ENDFOR + + + SSE2 +
emmintrin.h
+ Logical
- - - - Compute the bitwise XOR of packed double-precision (64-bit) floating-point - elements in "a" and "b", and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := a[i+63:i] XOR b[i+63:i] - ENDFOR - - - SSE2 -
emmintrin.h
- Logical + + + + Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[i+63:i] XOR b[i+63:i] +ENDFOR + + + SSE2 +
emmintrin.h
+ Logical
- - - - Compare packed 8-bit integers in "a" and "b" for equality, and store the - results in "dst". - - FOR j := 0 to 15 - i := j*8 - dst[i+7:i] := ( a[i+7:i] == b[i+7:i] ) ? 0xFF : 0 - ENDFOR - - - SSE2 -
emmintrin.h
- Compare + + + + Compare packed 8-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := ( a[i+7:i] == b[i+7:i] ) ? 0xFF : 0 +ENDFOR + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare packed 16-bit integers in "a" and "b" for equality, and store the - results in "dst". - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := ( a[i+15:i] == b[i+15:i] ) ? 0xFFFF : 0 - ENDFOR - - - SSE2 -
emmintrin.h
- Compare + + + + Compare packed 16-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := ( a[i+15:i] == b[i+15:i] ) ? 0xFFFF : 0 +ENDFOR + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare packed 32-bit integers in "a" and "b" for equality, and store the - results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ( a[i+31:i] == b[i+31:i] ) ? 0xFFFFFFFF : 0 - ENDFOR - - - SSE2 -
emmintrin.h
- Compare + + + + Compare packed 32-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ( a[i+31:i] == b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store - the results in "dst". - - FOR j := 0 to 15 - i := j*8 - dst[i+7:i] := ( a[i+7:i] > b[i+7:i] ) ? 0xFF : 0 - ENDFOR - - - SSE2 -
emmintrin.h
- Compare + + + + Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := ( a[i+7:i] > b[i+7:i] ) ? 0xFF : 0 +ENDFOR + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare packed signed 16-bit integers in "a" and "b" for greater-than, and - store the results in "dst". - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := ( a[i+15:i] > b[i+15:i] ) ? 0xFFFF : 0 - ENDFOR - - - SSE2 -
emmintrin.h
- Compare + + + + Compare packed signed 16-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := ( a[i+15:i] > b[i+15:i] ) ? 0xFFFF : 0 +ENDFOR + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare packed signed 32-bit integers in "a" and "b" for greater-than, and - store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ( a[i+31:i] > b[i+31:i] ) ? 0xFFFFFFFF : 0 - ENDFOR - - - SSE2 -
emmintrin.h
- Compare + + + + Compare packed signed 32-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ( a[i+31:i] > b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare packed signed 8-bit integers in "a" and "b" for less-than, and store - the results in "dst". Note: This intrinsic emits the pcmpgtb instruction with the order - of the operands switched. - - FOR j := 0 to 15 - i := j*8 - dst[i+7:i] := ( a[i+7:i] < b[i+7:i] ) ? 0xFF : 0 - ENDFOR - - - SSE2 -
emmintrin.h
- Compare + + + + Compare packed signed 8-bit integers in "a" and "b" for less-than, and store the results in "dst". Note: This intrinsic emits the pcmpgtb instruction with the order of the operands switched. + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := ( a[i+7:i] < b[i+7:i] ) ? 0xFF : 0 +ENDFOR + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare packed signed 16-bit integers in "a" and "b" for less-than, and store - the results in "dst". Note: This intrinsic emits the pcmpgtw instruction with the order - of the operands switched. - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := ( a[i+15:i] < b[i+15:i] ) ? 0xFFFF : 0 - ENDFOR - - - SSE2 -
emmintrin.h
- Compare + + + + Compare packed signed 16-bit integers in "a" and "b" for less-than, and store the results in "dst". Note: This intrinsic emits the pcmpgtw instruction with the order of the operands switched. + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := ( a[i+15:i] < b[i+15:i] ) ? 0xFFFF : 0 +ENDFOR + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare packed signed 32-bit integers in "a" and "b" for less-than, and store - the results in "dst". Note: This intrinsic emits the pcmpgtd instruction with the order - of the operands switched. - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ( a[i+31:i] < b[i+31:i] ) ? 0xFFFFFFFF : 0 - ENDFOR - - - SSE2 -
emmintrin.h
- Compare + + + + Compare packed signed 32-bit integers in "a" and "b" for less-than, and store the results in "dst". Note: This intrinsic emits the pcmpgtd instruction with the order of the operands switched. + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ( a[i+31:i] < b[i+31:i] ) ? 0xFFFFFFFF : 0 +ENDFOR + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare the lower double-precision (64-bit) floating-point elements in "a" and - "b" for equality, store the result in the lower element of "dst", and copy the upper - element from "a" to the upper element of "dst". - - dst[63:0] := (a[63:0] == b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0 - dst[127:64] := a[127:64] - - - SSE2 -
emmintrin.h
- Compare + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for equality, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := (a[63:0] == b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0 +dst[127:64] := a[127:64] + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare the lower double-precision (64-bit) floating-point elements in "a" and - "b" for less-than, store the result in the lower element of "dst", and copy the upper - element from "a" to the upper element of "dst". - - dst[63:0] := (a[63:0] < b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0 - dst[127:64] := a[127:64] - - - SSE2 -
emmintrin.h
- Compare + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for less-than, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := (a[63:0] < b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0 +dst[127:64] := a[127:64] + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare the lower double-precision (64-bit) floating-point elements in "a" and - "b" for less-than-or-equal, store the result in the lower element of "dst", and copy the - upper element from "a" to the upper element of "dst". - - dst[63:0] := (a[63:0] <= b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0 - dst[127:64] := a[127:64] - - - SSE2 -
emmintrin.h
- Compare + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for less-than-or-equal, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := (a[63:0] <= b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0 +dst[127:64] := a[127:64] + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare the lower double-precision (64-bit) floating-point elements in "a" and - "b" for greater-than, store the result in the lower element of "dst", and copy the upper - element from "a" to the upper element of "dst". - - dst[63:0] := (a[63:0] > b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0 - dst[127:64] := a[127:64] - - - SSE2 -
emmintrin.h
- Compare + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for greater-than, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := (a[63:0] > b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0 +dst[127:64] := a[127:64] + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare the lower double-precision (64-bit) floating-point elements in "a" and - "b" for greater-than-or-equal, store the result in the lower element of "dst", and copy - the upper element from "a" to the upper element of "dst". - - dst[63:0] := (a[63:0] >= b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0 - dst[127:64] := a[127:64] - - - SSE2 -
emmintrin.h
- Compare + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for greater-than-or-equal, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := (a[63:0] >= b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0 +dst[127:64] := a[127:64] + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare the lower double-precision (64-bit) floating-point elements in "a" and - "b" to see if neither is NaN, store the result in the lower element of "dst", and copy - the upper element from "a" to the upper element of "dst". - dst[63:0] := (a[63:0] != NaN AND b[63:0] != NaN) ? 0xFFFFFFFFFFFFFFFF : 0 - dst[127:64] := a[127:64] - - - SSE2 -
emmintrin.h
- Compare + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" to see if neither is NaN, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + dst[63:0] := (a[63:0] != NaN AND b[63:0] != NaN) ? 0xFFFFFFFFFFFFFFFF : 0 +dst[127:64] := a[127:64] + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare the lower double-precision (64-bit) floating-point elements in "a" and - "b" to see if either is NaN, store the result in the lower element of "dst", and copy - the upper element from "a" to the upper element of "dst". - dst[63:0] := (a[63:0] == NaN OR b[63:0] == NaN) ? 0xFFFFFFFFFFFFFFFF : 0 - dst[127:64] := a[127:64] - - - SSE2 -
emmintrin.h
- Compare + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" to see if either is NaN, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + dst[63:0] := (a[63:0] == NaN OR b[63:0] == NaN) ? 0xFFFFFFFFFFFFFFFF : 0 +dst[127:64] := a[127:64] + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare the lower double-precision (64-bit) floating-point elements in "a" and - "b" for not-equal, store the result in the lower element of "dst", and copy the upper - element from "a" to the upper element of "dst". - - dst[63:0] := (a[63:0] != b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0 - dst[127:64] := a[127:64] - - - SSE2 -
emmintrin.h
- Compare + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for not-equal, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := (a[63:0] != b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0 +dst[127:64] := a[127:64] + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare the lower double-precision (64-bit) floating-point elements in "a" and - "b" for not-less-than, store the result in the lower element of "dst", and copy the - upper element from "a" to the upper element of "dst". - - dst[63:0] := (!(a[63:0] < b[63:0])) ? 0xFFFFFFFFFFFFFFFF : 0 - dst[127:64] := a[127:64] - - - SSE2 -
emmintrin.h
- Compare + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for not-less-than, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := (!(a[63:0] < b[63:0])) ? 0xFFFFFFFFFFFFFFFF : 0 +dst[127:64] := a[127:64] + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare the lower double-precision (64-bit) floating-point elements in "a" and - "b" for not-less-than-or-equal, store the result in the lower element of "dst", and copy - the upper element from "a" to the upper element of "dst". - - dst[63:0] := (!(a[63:0] <= b[63:0])) ? 0xFFFFFFFFFFFFFFFF : 0 - dst[127:64] := a[127:64] - - - SSE2 -
emmintrin.h
- Compare + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for not-less-than-or-equal, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := (!(a[63:0] <= b[63:0])) ? 0xFFFFFFFFFFFFFFFF : 0 +dst[127:64] := a[127:64] + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare the lower double-precision (64-bit) floating-point elements in "a" and - "b" for not-greater-than, store the result in the lower element of "dst", and copy the - upper element from "a" to the upper element of "dst". - - dst[63:0] := (!(a[63:0] > b[63:0])) ? 0xFFFFFFFFFFFFFFFF : 0 - dst[127:64] := a[127:64] - - - SSE2 -
emmintrin.h
- Compare + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for not-greater-than, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := (!(a[63:0] > b[63:0])) ? 0xFFFFFFFFFFFFFFFF : 0 +dst[127:64] := a[127:64] + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare the lower double-precision (64-bit) floating-point elements in "a" and - "b" for not-greater-than-or-equal, store the result in the lower element of "dst", and - copy the upper element from "a" to the upper element of "dst". - - dst[63:0] := (!(a[63:0] >= b[63:0])) ? 0xFFFFFFFFFFFFFFFF : 0 - dst[127:64] := a[127:64] - - - SSE2 -
emmintrin.h
- Compare + + + + Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for not-greater-than-or-equal, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := (!(a[63:0] >= b[63:0])) ? 0xFFFFFFFFFFFFFFFF : 0 +dst[127:64] := a[127:64] + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - for equality, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := (a[i+63:i] == b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0 - ENDFOR - - - SSE2 -
emmintrin.h
- Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (a[i+63:i] == b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - for less-than, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := (a[i+63:i] < b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0 - ENDFOR - - - SSE2 -
emmintrin.h
- Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for less-than, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (a[i+63:i] < b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - for less-than-or-equal, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := (a[i+63:i] <= b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0 - ENDFOR - - - SSE2 -
emmintrin.h
- Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for less-than-or-equal, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (a[i+63:i] <= b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - for greater-than, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := (a[i+63:i] > b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0 - ENDFOR - - - SSE2 -
emmintrin.h
- Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (a[i+63:i] > b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - for greater-than-or-equal, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := (a[i+63:i] >= b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0 - ENDFOR - - - SSE2 -
emmintrin.h
- Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for greater-than-or-equal, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (a[i+63:i] >= b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - to see if neither is NaN, and store the results in "dst". - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := (a[i+63:i] != NaN AND b[i+63:i] != NaN) ? 0xFFFFFFFFFFFFFFFF : 0 - ENDFOR - - - SSE2 -
emmintrin.h
- Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" to see if neither is NaN, and store the results in "dst". + FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (a[i+63:i] != NaN AND b[i+63:i] != NaN) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - to see if either is NaN, and store the results in "dst". - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := (a[i+63:i] == NaN OR b[i+63:i] == NaN) ? 0xFFFFFFFFFFFFFFFF : 0 - ENDFOR - - - SSE2 -
emmintrin.h
- Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" to see if either is NaN, and store the results in "dst". + FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (a[i+63:i] == NaN OR b[i+63:i] == NaN) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - for not-equal, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := (a[i+63:i] != b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0 - ENDFOR - - - SSE2 -
emmintrin.h
- Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-equal, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (a[i+63:i] != b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - for not-less-than, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := (!(a[i+63:i] < b[i+63:i])) ? 0xFFFFFFFFFFFFFFFF : 0 - ENDFOR - - - SSE2 -
emmintrin.h
- Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-less-than, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (!(a[i+63:i] < b[i+63:i])) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - for not-less-than-or-equal, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := (!(a[i+63:i] <= b[i+63:i])) ? 0xFFFFFFFFFFFFFFFF : 0 - ENDFOR - - - SSE2 -
emmintrin.h
- Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-less-than-or-equal, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (!(a[i+63:i] <= b[i+63:i])) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - for not-greater-than, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := (!(a[i+63:i] > b[i+63:i])) ? 0xFFFFFFFFFFFFFFFF : 0 - ENDFOR - - - SSE2 -
emmintrin.h
- Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-greater-than, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (!(a[i+63:i] > b[i+63:i])) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare packed double-precision (64-bit) floating-point elements in "a" and "b" - for not-greater-than-or-equal, and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := (!(a[i+63:i] >= b[i+63:i])) ? 0xFFFFFFFFFFFFFFFF : 0 - ENDFOR - - - SSE2 -
emmintrin.h
- Compare + + + + Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-greater-than-or-equal, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := (!(a[i+63:i] >= b[i+63:i])) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare the lower double-precision (64-bit) floating-point element in "a" and - "b" for equality, and return the boolean result (0 or 1). - RETURN ( a[63:0] != NaN AND b[63:0] != NaN AND a[63:0] == b[63:0] ) ? 1 : 0 - - - SSE2 -
emmintrin.h
- Compare + + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for equality, and return the boolean result (0 or 1). + RETURN ( a[63:0] != NaN AND b[63:0] != NaN AND a[63:0] == b[63:0] ) ? 1 : 0 + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare the lower double-precision (64-bit) floating-point element in "a" and - "b" for less-than, and return the boolean result (0 or 1). - RETURN ( a[63:0] != NaN AND b[63:0] != NaN AND a[63:0] < b[63:0] ) ? 1 : 0 - - - SSE2 -
emmintrin.h
- Compare + + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for less-than, and return the boolean result (0 or 1). + RETURN ( a[63:0] != NaN AND b[63:0] != NaN AND a[63:0] < b[63:0] ) ? 1 : 0 + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare the lower double-precision (64-bit) floating-point element in "a" and - "b" for less-than-or-equal, and return the boolean result (0 or 1). - RETURN ( a[63:0] != NaN AND b[63:0] != NaN AND a[63:0] <= b[63:0] ) ? 1 : 0 - - - SSE2 -
emmintrin.h
- Compare + + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for less-than-or-equal, and return the boolean result (0 or 1). + RETURN ( a[63:0] != NaN AND b[63:0] != NaN AND a[63:0] <= b[63:0] ) ? 1 : 0 + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare the lower double-precision (64-bit) floating-point element in "a" and - "b" for greater-than, and return the boolean result (0 or 1). - RETURN ( a[63:0] != NaN AND b[63:0] != NaN AND a[63:0] > b[63:0] ) ? 1 : 0 - - - SSE2 -
emmintrin.h
- Compare + + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for greater-than, and return the boolean result (0 or 1). + RETURN ( a[63:0] != NaN AND b[63:0] != NaN AND a[63:0] > b[63:0] ) ? 1 : 0 + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare the lower double-precision (64-bit) floating-point element in "a" and - "b" for greater-than-or-equal, and return the boolean result (0 or 1). - RETURN ( a[63:0] != NaN AND b[63:0] != NaN AND a[63:0] >= b[63:0] ) ? 1 : 0 - - - SSE2 -
emmintrin.h
- Compare + + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for greater-than-or-equal, and return the boolean result (0 or 1). + RETURN ( a[63:0] != NaN AND b[63:0] != NaN AND a[63:0] >= b[63:0] ) ? 1 : 0 + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare the lower double-precision (64-bit) floating-point element in "a" and - "b" for not-equal, and return the boolean result (0 or 1). - RETURN ( a[63:0] == NaN OR b[63:0] == NaN OR a[63:0] != b[63:0] ) ? 1 : 0 - - - SSE2 -
emmintrin.h
- Compare + + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for not-equal, and return the boolean result (0 or 1). + RETURN ( a[63:0] == NaN OR b[63:0] == NaN OR a[63:0] != b[63:0] ) ? 1 : 0 + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare the lower double-precision (64-bit) floating-point element in "a" and - "b" for equality, and return the boolean result (0 or 1). This instruction will not - signal an exception for QNaNs. - RETURN ( a[63:0] != NaN AND b[63:0] != NaN AND a[63:0] == b[63:0] ) ? 1 : 0 - - - SSE2 -
emmintrin.h
- Compare + + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for equality, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + RETURN ( a[63:0] != NaN AND b[63:0] != NaN AND a[63:0] == b[63:0] ) ? 1 : 0 + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare the lower double-precision (64-bit) floating-point element in "a" and - "b" for less-than, and return the boolean result (0 or 1). This instruction will not - signal an exception for QNaNs. - RETURN ( a[63:0] != NaN AND b[63:0] != NaN AND a[63:0] < b[63:0] ) ? 1 : 0 - - - SSE2 -
emmintrin.h
- Compare + + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for less-than, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + RETURN ( a[63:0] != NaN AND b[63:0] != NaN AND a[63:0] < b[63:0] ) ? 1 : 0 + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare the lower double-precision (64-bit) floating-point element in "a" and - "b" for less-than-or-equal, and return the boolean result (0 or 1). This instruction - will not signal an exception for QNaNs. - RETURN ( a[63:0] != NaN AND b[63:0] != NaN AND a[63:0] <= b[63:0] ) ? 1 : 0 - - - SSE2 -
emmintrin.h
- Compare + + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for less-than-or-equal, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + RETURN ( a[63:0] != NaN AND b[63:0] != NaN AND a[63:0] <= b[63:0] ) ? 1 : 0 + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare the lower double-precision (64-bit) floating-point element in "a" and - "b" for greater-than, and return the boolean result (0 or 1). This instruction will not - signal an exception for QNaNs. - RETURN ( a[63:0] != NaN AND b[63:0] != NaN AND a[63:0] > b[63:0] ) ? 1 : 0 - - - SSE2 -
emmintrin.h
- Compare + + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for greater-than, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + RETURN ( a[63:0] != NaN AND b[63:0] != NaN AND a[63:0] > b[63:0] ) ? 1 : 0 + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare the lower double-precision (64-bit) floating-point element in "a" and - "b" for greater-than-or-equal, and return the boolean result (0 or 1). This instruction - will not signal an exception for QNaNs. - RETURN ( a[63:0] != NaN AND b[63:0] != NaN AND a[63:0] >= b[63:0] ) ? 1 : 0 - - - SSE2 -
emmintrin.h
- Compare + + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for greater-than-or-equal, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + RETURN ( a[63:0] != NaN AND b[63:0] != NaN AND a[63:0] >= b[63:0] ) ? 1 : 0 + + + SSE2 +
emmintrin.h
+ Compare
- - - - Compare the lower double-precision (64-bit) floating-point element in "a" and - "b" for not-equal, and return the boolean result (0 or 1). This instruction will not - signal an exception for QNaNs. - RETURN ( a[63:0] == NaN OR b[63:0] == NaN OR a[63:0] != b[63:0] ) ? 1 : 0 - - - SSE2 -
emmintrin.h
- Compare + + + + Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for not-equal, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs. + RETURN ( a[63:0] == NaN OR b[63:0] == NaN OR a[63:0] != b[63:0] ) ? 1 : 0 + + + SSE2 +
emmintrin.h
+ Compare
- - - Convert packed signed 32-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 1 - i := j*32 - m := j*64 - dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) - ENDFOR - - - SSE2 -
emmintrin.h
- Convert + + + Convert packed signed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + m := j*64 + dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) +ENDFOR + + + SSE2 +
emmintrin.h
+ Convert
- - - - Convert the signed 32-bit integer "b" to a double-precision (64-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper element from "a" to the upper element of "dst". - - dst[63:0] := Convert_Int32_To_FP64(b[31:0]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - SSE2 -
emmintrin.h
- Convert + + + + Convert the signed 32-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := Convert_Int32_To_FP64(b[31:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + SSE2 +
emmintrin.h
+ Convert
- - - - Convert the signed 64-bit integer "b" to a double-precision (64-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper element from "a" to the upper element of "dst". - - dst[63:0] := Convert_Int64_To_FP64(b[63:0]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - SSE2 -
emmintrin.h
- Convert + + + + Convert the signed 64-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := Convert_Int64_To_FP64(b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + SSE2 +
emmintrin.h
+ Convert
- - - - Convert the signed 64-bit integer "b" to a double-precision (64-bit) - floating-point element, store the result in the lower element of "dst", and copy the - upper element from "a" to the upper element of "dst". - - dst[63:0] := Convert_Int64_To_FP64(b[63:0]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - SSE2 -
emmintrin.h
- Convert + + + + Convert the signed 64-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := Convert_Int64_To_FP64(b[63:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + SSE2 +
emmintrin.h
+ Convert
- - - Convert packed signed 32-bit integers in "a" to packed single-precision - (32-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 3 - i := 32*j - dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) - ENDFOR - - - SSE2 -
emmintrin.h
- Convert + + + Convert packed signed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i]) +ENDFOR + + + SSE2 +
emmintrin.h
+ Convert
- - - Convert packed signed 32-bit integers in "a" to packed double-precision - (64-bit) floating-point elements, and store the results in "dst". - - FOR j := 0 to 1 - i := j*32 - m := j*64 - dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) - ENDFOR - - - SSE2 -
emmintrin.h
- Convert + + + Convert packed signed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 1 + i := j*32 + m := j*64 + dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) +ENDFOR + + + SSE2 +
emmintrin.h
+ Convert
- - - Copy 32-bit integer "a" to the lower elements of "dst", and zero the upper - elements of "dst". - - dst[31:0] := a[31:0] - dst[127:32] := 0 - - - SSE2 -
emmintrin.h
- Convert + + + Copy 32-bit integer "a" to the lower elements of "dst", and zero the upper elements of "dst". + +dst[31:0] := a[31:0] +dst[127:32] := 0 + + + SSE2 +
emmintrin.h
+ Convert
- - - Copy 64-bit integer "a" to the lower element of "dst", and zero the upper - element. - - dst[63:0] := a[63:0] - dst[127:64] := 0 - - - SSE2 -
emmintrin.h
- Convert + + + Copy 64-bit integer "a" to the lower element of "dst", and zero the upper element. + +dst[63:0] := a[63:0] +dst[127:64] := 0 + + + SSE2 +
emmintrin.h
+ Convert
- - - Copy 64-bit integer "a" to the lower element of "dst", and zero the upper - element. - - dst[63:0] := a[63:0] - dst[127:64] := 0 - - - SSE2 -
emmintrin.h
- Convert + + + Copy 64-bit integer "a" to the lower element of "dst", and zero the upper element. + +dst[63:0] := a[63:0] +dst[127:64] := 0 + + + SSE2 +
emmintrin.h
+ Convert
- - - Copy the lower 32-bit integer in "a" to "dst". - - dst[31:0] := a[31:0] - - - SSE2 -
emmintrin.h
- Convert + + + Copy the lower 32-bit integer in "a" to "dst". + +dst[31:0] := a[31:0] + + + SSE2 +
emmintrin.h
+ Convert
- - - Copy the lower 64-bit integer in "a" to "dst". - - dst[63:0] := a[63:0] - - - SSE2 -
emmintrin.h
- Convert + + + Copy the lower 64-bit integer in "a" to "dst". + +dst[63:0] := a[63:0] + + + SSE2 +
emmintrin.h
+ Convert
- - - Copy the lower 64-bit integer in "a" to "dst". - - dst[63:0] := a[63:0] - - - SSE2 -
emmintrin.h
- Convert + + + Copy the lower 64-bit integer in "a" to "dst". + +dst[63:0] := a[63:0] + + + SSE2 +
emmintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed single-precision (32-bit) floating-point elements, and store the results in - "dst". - - FOR j := 0 to 1 - i := 32*j - k := 64*j - dst[i+31:i] := Convert_FP64_To_FP32(a[k+63:k]) - ENDFOR - dst[127:64] := 0 - - - SSE2 -
emmintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 1 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_FP32(a[k+63:k]) +ENDFOR +dst[127:64] := 0 + + + SSE2 +
emmintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed double-precision (64-bit) floating-point elements, and store the results in - "dst". - - FOR j := 0 to 1 - i := 64*j - k := 32*j - dst[i+63:i] := Convert_FP32_To_FP64(a[k+31:k]) - ENDFOR - - - SSE2 -
emmintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 32*j + dst[i+63:i] := Convert_FP32_To_FP64(a[k+31:k]) +ENDFOR + + + SSE2 +
emmintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst". - - FOR j := 0 to 1 - i := 32*j - k := 64*j - dst[i+31:i] := Convert_FP64_To_Int32(a[k+63:k]) - ENDFOR - - - SSE2 -
emmintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_Int32(a[k+63:k]) +ENDFOR + + + SSE2 +
emmintrin.h
+ Convert
- - - Convert the lower double-precision (64-bit) floating-point element in "a" to a - 32-bit integer, and store the result in "dst". - - dst[31:0] := Convert_FP64_To_Int32(a[63:0]) - - - SSE2 -
emmintrin.h
- Convert + + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst". + +dst[31:0] := Convert_FP64_To_Int32(a[63:0]) + + + SSE2 +
emmintrin.h
+ Convert
- - - Convert the lower double-precision (64-bit) floating-point element in "a" to a - 64-bit integer, and store the result in "dst". - - dst[63:0] := Convert_FP64_To_Int64(a[63:0]) - - - SSE2 -
emmintrin.h
- Convert + + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst". + +dst[63:0] := Convert_FP64_To_Int64(a[63:0]) + + + SSE2 +
emmintrin.h
+ Convert
- - - Convert the lower double-precision (64-bit) floating-point element in "a" to a - 64-bit integer, and store the result in "dst". - - dst[63:0] := Convert_FP64_To_Int64(a[63:0]) - - - SSE2 -
emmintrin.h
- Convert + + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst". + +dst[63:0] := Convert_FP64_To_Int64(a[63:0]) + + + SSE2 +
emmintrin.h
+ Convert
- - - - Convert the lower double-precision (64-bit) floating-point element in "b" to a - single-precision (32-bit) floating-point element, store the result in the lower element - of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". - - dst[31:0] := Convert_FP64_To_FP32(b[63:0]) - dst[127:32] := a[127:32] - dst[MAX:128] := 0 - - - SSE2 -
emmintrin.h
- Convert + + + + Convert the lower double-precision (64-bit) floating-point element in "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := Convert_FP64_To_FP32(b[63:0]) +dst[127:32] := a[127:32] +dst[MAX:128] := 0 + + + SSE2 +
emmintrin.h
+ Convert
- - - Copy the lower double-precision (64-bit) floating-point element of "a" to - "dst". - - dst[63:0] := a[63:0] - - - SSE2 -
emmintrin.h
- Convert + + + Copy the lower double-precision (64-bit) floating-point element of "a" to "dst". + +dst[63:0] := a[63:0] + + + SSE2 +
emmintrin.h
+ Convert
- - - - Convert the lower single-precision (32-bit) floating-point element in "b" to a - double-precision (64-bit) floating-point element, store the result in the lower element - of "dst", and copy the upper element from "a" to the upper element of "dst". - - dst[63:0] := Convert_FP32_To_FP64(b[31:0]) - dst[127:64] := a[127:64] - dst[MAX:128] := 0 - - - SSE2 -
emmintrin.h
- Convert + + + + Convert the lower single-precision (32-bit) floating-point element in "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := Convert_FP32_To_FP64(b[31:0]) +dst[127:64] := a[127:64] +dst[MAX:128] := 0 + + + SSE2 +
emmintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 1 - i := 32*j - k := 64*j - dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[k+63:k]) - ENDFOR - - - SSE2 -
emmintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 1 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[k+63:k]) +ENDFOR + + + SSE2 +
emmintrin.h
+ Convert
- - - Convert the lower double-precision (64-bit) floating-point element in "a" to a - 32-bit integer with truncation, and store the result in "dst". - - dst[31:0] := Convert_FP64_To_Int32_Truncate(a[63:0]) - - - SSE2 -
emmintrin.h
- Convert + + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst". + +dst[31:0] := Convert_FP64_To_Int32_Truncate(a[63:0]) + + + SSE2 +
emmintrin.h
+ Convert
- - - Convert the lower double-precision (64-bit) floating-point element in "a" to a - 64-bit integer with truncation, and store the result in "dst". - - dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0]) - - - SSE2 -
emmintrin.h
- Convert + + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst". + +dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0]) + + + SSE2 +
emmintrin.h
+ Convert
- - - Convert the lower double-precision (64-bit) floating-point element in "a" to a - 64-bit integer with truncation, and store the result in "dst". - - dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0]) - - - SSE2 -
emmintrin.h
- Convert + + + Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst". + +dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0]) + + + SSE2 +
emmintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst". - - FOR j := 0 to 3 - i := 32*j - dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) - ENDFOR - - - SSE2 -
emmintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) +ENDFOR + + + SSE2 +
emmintrin.h
+ Convert
- - - Convert packed single-precision (32-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 3 - i := 32*j - dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) - ENDFOR - - - SSE2 -
emmintrin.h
- Convert + + + Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) +ENDFOR + + + SSE2 +
emmintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 32-bit integers, and store the results in "dst". - - FOR j := 0 to 1 - i := 32*j - k := 64*j - dst[i+31:i] := Convert_FP64_To_Int32(a[k+63:k]) - ENDFOR - - - SSE2 -
emmintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_Int32(a[k+63:k]) +ENDFOR + + + SSE2 +
emmintrin.h
+ Convert
- - - Convert packed double-precision (64-bit) floating-point elements in "a" to - packed 32-bit integers with truncation, and store the results in "dst". - - FOR j := 0 to 1 - i := 32*j - k := 64*j - dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[k+63:k]) - ENDFOR - - - SSE2 -
emmintrin.h
- Convert + + + Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". + +FOR j := 0 to 1 + i := 32*j + k := 64*j + dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[k+63:k]) +ENDFOR + + + SSE2 +
emmintrin.h
+ Convert
- - - - Set packed 64-bit integers in "dst" with the supplied values. - - dst[63:0] := e0 - dst[127:64] := e1 - - SSE2 -
emmintrin.h
- Set + + + + Set packed 64-bit integers in "dst" with the supplied values. + +dst[63:0] := e0 +dst[127:64] := e1 + + SSE2 +
emmintrin.h
+ Set
- - - - Set packed 64-bit integers in "dst" with the supplied values. - - dst[63:0] := e0 - dst[127:64] := e1 - - SSE2 -
emmintrin.h
- Set + + + + Set packed 64-bit integers in "dst" with the supplied values. + +dst[63:0] := e0 +dst[127:64] := e1 + + SSE2 +
emmintrin.h
+ Set
- - - - - - Set packed 32-bit integers in "dst" with the supplied values. - - dst[31:0] := e0 - dst[63:32] := e1 - dst[95:64] := e2 - dst[127:96] := e3 - - SSE2 -
emmintrin.h
- Set + + + + + + Set packed 32-bit integers in "dst" with the supplied values. + +dst[31:0] := e0 +dst[63:32] := e1 +dst[95:64] := e2 +dst[127:96] := e3 + + SSE2 +
emmintrin.h
+ Set
- - - - - - - - - - Set packed 16-bit integers in "dst" with the supplied values. - - dst[15:0] := e0 - dst[31:16] := e1 - dst[47:32] := e2 - dst[63:48] := e3 - dst[79:64] := e4 - dst[95:80] := e5 - dst[111:96] := e6 - dst[127:112] := e7 - - SSE2 -
emmintrin.h
- Set + + + + + + + + + + Set packed 16-bit integers in "dst" with the supplied values. + +dst[15:0] := e0 +dst[31:16] := e1 +dst[47:32] := e2 +dst[63:48] := e3 +dst[79:64] := e4 +dst[95:80] := e5 +dst[111:96] := e6 +dst[127:112] := e7 + + SSE2 +
emmintrin.h
+ Set
- - - - - - - - - - - - - - - - - - Set packed 8-bit integers in "dst" with the supplied values. - - dst[7:0] := e0 - dst[15:8] := e1 - dst[23:16] := e2 - dst[31:24] := e3 - dst[39:32] := e4 - dst[47:40] := e5 - dst[55:48] := e6 - dst[63:56] := e7 - dst[71:64] := e8 - dst[79:72] := e9 - dst[87:80] := e10 - dst[95:88] := e11 - dst[103:96] := e12 - dst[111:104] := e13 - dst[119:112] := e14 - dst[127:120] := e15 - - SSE2 -
emmintrin.h
- Set + + + + + + + + + + + + + + + + + + Set packed 8-bit integers in "dst" with the supplied values. + +dst[7:0] := e0 +dst[15:8] := e1 +dst[23:16] := e2 +dst[31:24] := e3 +dst[39:32] := e4 +dst[47:40] := e5 +dst[55:48] := e6 +dst[63:56] := e7 +dst[71:64] := e8 +dst[79:72] := e9 +dst[87:80] := e10 +dst[95:88] := e11 +dst[103:96] := e12 +dst[111:104] := e13 +dst[119:112] := e14 +dst[127:120] := e15 + + SSE2 +
emmintrin.h
+ Set
- - - Broadcast 64-bit integer "a" to all elements of "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := a[63:0] - ENDFOR - - SSE2 -
emmintrin.h
- Set + + + Broadcast 64-bit integer "a" to all elements of "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR + + SSE2 +
emmintrin.h
+ Set
- - - Broadcast 64-bit integer "a" to all elements of "dst". This intrinsic may - generate the "vpbroadcastq". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := a[63:0] - ENDFOR - - SSE2 -
emmintrin.h
- Set + + + Broadcast 64-bit integer "a" to all elements of "dst". This intrinsic may generate the "vpbroadcastq". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR + + SSE2 +
emmintrin.h
+ Set
- - - Broadcast 32-bit integer "a" to all elements of "dst". This intrinsic may - generate "vpbroadcastd". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := a[31:0] - ENDFOR - - SSE2 -
emmintrin.h
- Set + + + Broadcast 32-bit integer "a" to all elements of "dst". This intrinsic may generate "vpbroadcastd". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := a[31:0] +ENDFOR + + SSE2 +
emmintrin.h
+ Set
- - - Broadcast 16-bit integer "a" to all all elements of "dst". This intrinsic may - generate "vpbroadcastw". - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := a[15:0] - ENDFOR - - SSE2 -
emmintrin.h
- Set + + + Broadcast 16-bit integer "a" to all all elements of "dst". This intrinsic may generate "vpbroadcastw". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := a[15:0] +ENDFOR + + SSE2 +
emmintrin.h
+ Set
- - - Broadcast 8-bit integer "a" to all elements of "dst". This intrinsic may - generate "vpbroadcastb". - - FOR j := 0 to 15 - i := j*8 - dst[i+7:i] := a[7:0] - ENDFOR - - SSE2 -
emmintrin.h
- Set + + + Broadcast 8-bit integer "a" to all elements of "dst". This intrinsic may generate "vpbroadcastb". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := a[7:0] +ENDFOR + + SSE2 +
emmintrin.h
+ Set
- - - - Set packed 64-bit integers in "dst" with the supplied values in reverse order. - - dst[63:0] := e1 - dst[127:64] := e0 - - SSE2 -
emmintrin.h
- Set + + + + Set packed 64-bit integers in "dst" with the supplied values in reverse order. + +dst[63:0] := e1 +dst[127:64] := e0 + + SSE2 +
emmintrin.h
+ Set
- - - - - - Set packed 32-bit integers in "dst" with the supplied values in reverse order. - - dst[31:0] := e3 - dst[63:32] := e2 - dst[95:64] := e1 - dst[127:96] := e0 - - SSE2 -
emmintrin.h
- Set + + + + + + Set packed 32-bit integers in "dst" with the supplied values in reverse order. + +dst[31:0] := e3 +dst[63:32] := e2 +dst[95:64] := e1 +dst[127:96] := e0 + + SSE2 +
emmintrin.h
+ Set
- - - - - - - - - - Set packed 16-bit integers in "dst" with the supplied values in reverse order. - - dst[15:0] := e7 - dst[31:16] := e6 - dst[47:32] := e5 - dst[63:48] := e4 - dst[79:64] := e3 - dst[95:80] := e2 - dst[111:96] := e1 - dst[127:112] := e0 - - SSE2 -
emmintrin.h
- Set + + + + + + + + + + Set packed 16-bit integers in "dst" with the supplied values in reverse order. + +dst[15:0] := e7 +dst[31:16] := e6 +dst[47:32] := e5 +dst[63:48] := e4 +dst[79:64] := e3 +dst[95:80] := e2 +dst[111:96] := e1 +dst[127:112] := e0 + + SSE2 +
emmintrin.h
+ Set
- - - - - - - - - - - - - - - - - - Set packed 8-bit integers in "dst" with the supplied values in reverse order. - - dst[7:0] := e15 - dst[15:8] := e14 - dst[23:16] := e13 - dst[31:24] := e12 - dst[39:32] := e11 - dst[47:40] := e10 - dst[55:48] := e9 - dst[63:56] := e8 - dst[71:64] := e7 - dst[79:72] := e6 - dst[87:80] := e5 - dst[95:88] := e4 - dst[103:96] := e3 - dst[111:104] := e2 - dst[119:112] := e1 - dst[127:120] := e0 - - SSE2 -
emmintrin.h
- Set + + + + + + + + + + + + + + + + + + Set packed 8-bit integers in "dst" with the supplied values in reverse order. + +dst[7:0] := e15 +dst[15:8] := e14 +dst[23:16] := e13 +dst[31:24] := e12 +dst[39:32] := e11 +dst[47:40] := e10 +dst[55:48] := e9 +dst[63:56] := e8 +dst[71:64] := e7 +dst[79:72] := e6 +dst[87:80] := e5 +dst[95:88] := e4 +dst[103:96] := e3 +dst[111:104] := e2 +dst[119:112] := e1 +dst[127:120] := e0 + + SSE2 +
emmintrin.h
+ Set
- - Return vector of type __m128i with all elements set to zero. - - dst[MAX:0] := 0 - - - SSE2 -
emmintrin.h
- Set + + Return vector of type __m128i with all elements set to zero. + +dst[MAX:0] := 0 + + + SSE2 +
emmintrin.h
+ Set
- - - Copy double-precision (64-bit) floating-point element "a" to the lower element - of "dst", and zero the upper element. - - dst[63:0] := a[63:0] - dst[127:64] := 0 - - SSE2 -
emmintrin.h
- Set + + + Copy double-precision (64-bit) floating-point element "a" to the lower element of "dst", and zero the upper element. + +dst[63:0] := a[63:0] +dst[127:64] := 0 + + SSE2 +
emmintrin.h
+ Set
- - - Broadcast double-precision (64-bit) floating-point value "a" to all elements of - "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := a[63:0] - ENDFOR - - SSE2 -
emmintrin.h
- Set + + + Broadcast double-precision (64-bit) floating-point value "a" to all elements of "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR + + SSE2 +
emmintrin.h
+ Set
- - - Broadcast double-precision (64-bit) floating-point value "a" to all elements of - "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := a[63:0] - ENDFOR - - SSE2 -
emmintrin.h
- Set + + + Broadcast double-precision (64-bit) floating-point value "a" to all elements of "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := a[63:0] +ENDFOR + + SSE2 +
emmintrin.h
+ Set
- - - - Set packed double-precision (64-bit) floating-point elements in "dst" with the - supplied values. - - dst[63:0] := e0 - dst[127:64] := e1 - - SSE2 -
emmintrin.h
- Set + + + + Set packed double-precision (64-bit) floating-point elements in "dst" with the supplied values. + +dst[63:0] := e0 +dst[127:64] := e1 + + SSE2 +
emmintrin.h
+ Set
- - - - Set packed double-precision (64-bit) floating-point elements in "dst" with the - supplied values in reverse order. - - dst[63:0] := e1 - dst[127:64] := e0 - - SSE2 -
emmintrin.h
- Set + + + + Set packed double-precision (64-bit) floating-point elements in "dst" with the supplied values in reverse order. + +dst[63:0] := e1 +dst[127:64] := e0 + + SSE2 +
emmintrin.h
+ Set
- - - Return vector of type __m128d with all elements set to zero. - - dst[MAX:0] := 0 - - - SSE2 -
emmintrin.h
- Set + + + Return vector of type __m128d with all elements set to zero. + +dst[MAX:0] := 0 + + + SSE2 +
emmintrin.h
+ Set
- - - Copy the lower 64-bit integer in "a" to "dst". - - dst[63:0] := a[63:0] - - - SSE2 -
emmintrin.h
- Miscellaneous + + + Copy the lower 64-bit integer in "a" to "dst". + +dst[63:0] := a[63:0] + + + SSE2 +
emmintrin.h
+ Miscellaneous
- - - - Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers - using signed saturation, and store the results in "dst". - - dst[7:0] := Saturate8(a[15:0]) - dst[15:8] := Saturate8(a[31:16]) - dst[23:16] := Saturate8(a[47:32]) - dst[31:24] := Saturate8(a[63:48]) - dst[39:32] := Saturate8(a[79:64]) - dst[47:40] := Saturate8(a[95:80]) - dst[55:48] := Saturate8(a[111:96]) - dst[63:56] := Saturate8(a[127:112]) - dst[71:64] := Saturate8(b[15:0]) - dst[79:72] := Saturate8(b[31:16]) - dst[87:80] := Saturate8(b[47:32]) - dst[95:88] := Saturate8(b[63:48]) - dst[103:96] := Saturate8(b[79:64]) - dst[111:104] := Saturate8(b[95:80]) - dst[119:112] := Saturate8(b[111:96]) - dst[127:120] := Saturate8(b[127:112]) - - - SSE2 -
emmintrin.h
- Miscellaneous + + + + Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst". + +dst[7:0] := Saturate8(a[15:0]) +dst[15:8] := Saturate8(a[31:16]) +dst[23:16] := Saturate8(a[47:32]) +dst[31:24] := Saturate8(a[63:48]) +dst[39:32] := Saturate8(a[79:64]) +dst[47:40] := Saturate8(a[95:80]) +dst[55:48] := Saturate8(a[111:96]) +dst[63:56] := Saturate8(a[127:112]) +dst[71:64] := Saturate8(b[15:0]) +dst[79:72] := Saturate8(b[31:16]) +dst[87:80] := Saturate8(b[47:32]) +dst[95:88] := Saturate8(b[63:48]) +dst[103:96] := Saturate8(b[79:64]) +dst[111:104] := Saturate8(b[95:80]) +dst[119:112] := Saturate8(b[111:96]) +dst[127:120] := Saturate8(b[127:112]) + + + SSE2 +
emmintrin.h
+ Miscellaneous
- - - - Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit - integers using signed saturation, and store the results in "dst". - - dst[15:0] := Saturate16(a[31:0]) - dst[31:16] := Saturate16(a[63:32]) - dst[47:32] := Saturate16(a[95:64]) - dst[63:48] := Saturate16(a[127:96]) - dst[79:64] := Saturate16(b[31:0]) - dst[95:80] := Saturate16(b[63:32]) - dst[111:96] := Saturate16(b[95:64]) - dst[127:112] := Saturate16(b[127:96]) - - - SSE2 -
emmintrin.h
- Miscellaneous + + + + Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst". + +dst[15:0] := Saturate16(a[31:0]) +dst[31:16] := Saturate16(a[63:32]) +dst[47:32] := Saturate16(a[95:64]) +dst[63:48] := Saturate16(a[127:96]) +dst[79:64] := Saturate16(b[31:0]) +dst[95:80] := Saturate16(b[63:32]) +dst[111:96] := Saturate16(b[95:64]) +dst[127:112] := Saturate16(b[127:96]) + + + SSE2 +
emmintrin.h
+ Miscellaneous
- - - - Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers - using unsigned saturation, and store the results in "dst". - - dst[7:0] := SaturateU8(a[15:0]) - dst[15:8] := SaturateU8(a[31:16]) - dst[23:16] := SaturateU8(a[47:32]) - dst[31:24] := SaturateU8(a[63:48]) - dst[39:32] := SaturateU8(a[79:64]) - dst[47:40] := SaturateU8(a[95:80]) - dst[55:48] := SaturateU8(a[111:96]) - dst[63:56] := SaturateU8(a[127:112]) - dst[71:64] := SaturateU8(b[15:0]) - dst[79:72] := SaturateU8(b[31:16]) - dst[87:80] := SaturateU8(b[47:32]) - dst[95:88] := SaturateU8(b[63:48]) - dst[103:96] := SaturateU8(b[79:64]) - dst[111:104] := SaturateU8(b[95:80]) - dst[119:112] := SaturateU8(b[111:96]) - dst[127:120] := SaturateU8(b[127:112]) - - - SSE2 -
emmintrin.h
- Miscellaneous + + + + Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst". + +dst[7:0] := SaturateU8(a[15:0]) +dst[15:8] := SaturateU8(a[31:16]) +dst[23:16] := SaturateU8(a[47:32]) +dst[31:24] := SaturateU8(a[63:48]) +dst[39:32] := SaturateU8(a[79:64]) +dst[47:40] := SaturateU8(a[95:80]) +dst[55:48] := SaturateU8(a[111:96]) +dst[63:56] := SaturateU8(a[127:112]) +dst[71:64] := SaturateU8(b[15:0]) +dst[79:72] := SaturateU8(b[31:16]) +dst[87:80] := SaturateU8(b[47:32]) +dst[95:88] := SaturateU8(b[63:48]) +dst[103:96] := SaturateU8(b[79:64]) +dst[111:104] := SaturateU8(b[95:80]) +dst[119:112] := SaturateU8(b[111:96]) +dst[127:120] := SaturateU8(b[127:112]) + + + SSE2 +
emmintrin.h
+ Miscellaneous
- - - Create mask from the most significant bit of each 8-bit element in "a", and - store the result in "dst". - - FOR j := 0 to 15 - i := j*8 - dst[j] := a[i+7] - ENDFOR - dst[MAX:16] := 0 - - - SSE2 -
emmintrin.h
- Miscellaneous + + + Create mask from the most significant bit of each 8-bit element in "a", and store the result in "dst". + +FOR j := 0 to 15 + i := j*8 + dst[j] := a[i+7] +ENDFOR +dst[MAX:16] := 0 + + + SSE2 +
emmintrin.h
+ Miscellaneous
- - - Set each bit of mask "dst" based on the most significant bit of the - corresponding packed double-precision (64-bit) floating-point element in "a". - - FOR j := 0 to 1 - i := j*64 - IF a[i+63] - dst[j] := 1 - ELSE - dst[j] := 0 - FI - ENDFOR - dst[MAX:2] := 0 - - - SSE2 -
emmintrin.h
- Miscellaneous + + + Set each bit of mask "dst" based on the most significant bit of the corresponding packed double-precision (64-bit) floating-point element in "a". + +FOR j := 0 to 1 + i := j*64 + IF a[i+63] + dst[j] := 1 + ELSE + dst[j] := 0 + FI +ENDFOR +dst[MAX:2] := 0 + + + SSE2 +
emmintrin.h
+ Miscellaneous
- - - Copy the 64-bit integer "a" to the lower element of "dst", and zero the upper - element. - - dst[63:0] := a[63:0] - dst[127:64] := 0 - - - SSE2 -
emmintrin.h
- Move + + + Copy the 64-bit integer "a" to the lower element of "dst", and zero the upper element. + +dst[63:0] := a[63:0] +dst[127:64] := 0 + + + SSE2 +
emmintrin.h
+ Move
- - - Copy the lower 64-bit integer in "a" to the lower element of "dst", and zero - the upper element. - - dst[63:0] := a[63:0] - dst[127:64] := 0 - - - SSE2 -
emmintrin.h
- Move + + + Copy the lower 64-bit integer in "a" to the lower element of "dst", and zero the upper element. + +dst[63:0] := a[63:0] +dst[127:64] := 0 + + + SSE2 +
emmintrin.h
+ Move
- - - - Move the lower double-precision (64-bit) floating-point element from "b" to the - lower element of "dst", and copy the upper element from "a" to the upper element of - "dst". - - dst[63:0] := b[63:0] - dst[127:64] := a[127:64] - - - SSE2 -
emmintrin.h
- Move + + + + Move the lower double-precision (64-bit) floating-point element from "b" to the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := b[63:0] +dst[127:64] := a[127:64] + + + SSE2 +
emmintrin.h
+ Move
- - - - Extract a 16-bit integer from "a", selected with "imm8", and store the result - in the lower element of "dst". - - dst[15:0] := (a[127:0] >> (imm8[2:0] * 16))[15:0] - dst[31:16] := 0 - - - SSE2 -
emmintrin.h
- Swizzle + + + + Extract a 16-bit integer from "a", selected with "imm8", and store the result in the lower element of "dst". + +dst[15:0] := (a[127:0] >> (imm8[2:0] * 16))[15:0] +dst[31:16] := 0 + + + SSE2 +
emmintrin.h
+ Swizzle
- - - - - Copy "a" to "dst", and insert the 16-bit integer "i" into "dst" at the location - specified by "imm8". - - dst[127:0] := a[127:0] - sel := imm8[2:0]*16 - dst[sel+15:sel] := i[15:0] - - - SSE2 -
emmintrin.h
- Swizzle + + + + + Copy "a" to "dst", and insert the 16-bit integer "i" into "dst" at the location specified by "imm8". + +dst[127:0] := a[127:0] +sel := imm8[2:0]*16 +dst[sel+15:sel] := i[15:0] + + + SSE2 +
emmintrin.h
+ Swizzle
- - - - Shuffle 32-bit integers in "a" using the control in "imm8", and store the - results in "dst". - - DEFINE SELECT4(src, control) { - CASE(control[1:0]) OF - 0: tmp[31:0] := src[31:0] - 1: tmp[31:0] := src[63:32] - 2: tmp[31:0] := src[95:64] - 3: tmp[31:0] := src[127:96] - ESAC - RETURN tmp[31:0] - } - dst[31:0] := SELECT4(a[127:0], imm8[1:0]) - dst[63:32] := SELECT4(a[127:0], imm8[3:2]) - dst[95:64] := SELECT4(a[127:0], imm8[5:4]) - dst[127:96] := SELECT4(a[127:0], imm8[7:6]) - - - SSE2 -
emmintrin.h
- Swizzle + + + + Shuffle 32-bit integers in "a" using the control in "imm8", and store the results in "dst". + +DEFINE SELECT4(src, control) { + CASE(control[1:0]) OF + 0: tmp[31:0] := src[31:0] + 1: tmp[31:0] := src[63:32] + 2: tmp[31:0] := src[95:64] + 3: tmp[31:0] := src[127:96] + ESAC + RETURN tmp[31:0] +} +dst[31:0] := SELECT4(a[127:0], imm8[1:0]) +dst[63:32] := SELECT4(a[127:0], imm8[3:2]) +dst[95:64] := SELECT4(a[127:0], imm8[5:4]) +dst[127:96] := SELECT4(a[127:0], imm8[7:6]) + + + SSE2 +
emmintrin.h
+ Swizzle
- - - - - Shuffle 16-bit integers in the high 64 bits of "a" using the control in "imm8". - Store the results in the high 64 bits of "dst", with the low 64 bits being copied from - from "a" to "dst". - - dst[63:0] := a[63:0] - dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] - dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] - dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] - dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] - - - SSE2 -
emmintrin.h
- Swizzle + + + + + Shuffle 16-bit integers in the high 64 bits of "a" using the control in "imm8". Store the results in the high 64 bits of "dst", with the low 64 bits being copied from from "a" to "dst". + +dst[63:0] := a[63:0] +dst[79:64] := (a >> (imm8[1:0] * 16))[79:64] +dst[95:80] := (a >> (imm8[3:2] * 16))[79:64] +dst[111:96] := (a >> (imm8[5:4] * 16))[79:64] +dst[127:112] := (a >> (imm8[7:6] * 16))[79:64] + + + SSE2 +
emmintrin.h
+ Swizzle
- - - - Shuffle 16-bit integers in the low 64 bits of "a" using the control in "imm8". - Store the results in the low 64 bits of "dst", with the high 64 bits being copied from - from "a" to "dst". - - dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] - dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] - dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] - dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] - dst[127:64] := a[127:64] - - - SSE2 -
emmintrin.h
- Swizzle + + + + Shuffle 16-bit integers in the low 64 bits of "a" using the control in "imm8". Store the results in the low 64 bits of "dst", with the high 64 bits being copied from from "a" to "dst". + +dst[15:0] := (a >> (imm8[1:0] * 16))[15:0] +dst[31:16] := (a >> (imm8[3:2] * 16))[15:0] +dst[47:32] := (a >> (imm8[5:4] * 16))[15:0] +dst[63:48] := (a >> (imm8[7:6] * 16))[15:0] +dst[127:64] := a[127:64] + + + SSE2 +
emmintrin.h
+ Swizzle
- - - - Unpack and interleave 8-bit integers from the high half of "a" and "b", and - store the results in "dst". - - DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) { - dst[7:0] := src1[71:64] - dst[15:8] := src2[71:64] - dst[23:16] := src1[79:72] - dst[31:24] := src2[79:72] - dst[39:32] := src1[87:80] - dst[47:40] := src2[87:80] - dst[55:48] := src1[95:88] - dst[63:56] := src2[95:88] - dst[71:64] := src1[103:96] - dst[79:72] := src2[103:96] - dst[87:80] := src1[111:104] - dst[95:88] := src2[111:104] - dst[103:96] := src1[119:112] - dst[111:104] := src2[119:112] - dst[119:112] := src1[127:120] - dst[127:120] := src2[127:120] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) - - - SSE2 -
emmintrin.h
- Swizzle + + + + Unpack and interleave 8-bit integers from the high half of "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) { + dst[7:0] := src1[71:64] + dst[15:8] := src2[71:64] + dst[23:16] := src1[79:72] + dst[31:24] := src2[79:72] + dst[39:32] := src1[87:80] + dst[47:40] := src2[87:80] + dst[55:48] := src1[95:88] + dst[63:56] := src2[95:88] + dst[71:64] := src1[103:96] + dst[79:72] := src2[103:96] + dst[87:80] := src1[111:104] + dst[95:88] := src2[111:104] + dst[103:96] := src1[119:112] + dst[111:104] := src2[119:112] + dst[119:112] := src1[127:120] + dst[127:120] := src2[127:120] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0]) + + + SSE2 +
emmintrin.h
+ Swizzle
- - - - Unpack and interleave 16-bit integers from the high half of "a" and "b", and - store the results in "dst". - - DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) { - dst[15:0] := src1[79:64] - dst[31:16] := src2[79:64] - dst[47:32] := src1[95:80] - dst[63:48] := src2[95:80] - dst[79:64] := src1[111:96] - dst[95:80] := src2[111:96] - dst[111:96] := src1[127:112] - dst[127:112] := src2[127:112] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) - - - SSE2 -
emmintrin.h
- Swizzle + + + + Unpack and interleave 16-bit integers from the high half of "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) { + dst[15:0] := src1[79:64] + dst[31:16] := src2[79:64] + dst[47:32] := src1[95:80] + dst[63:48] := src2[95:80] + dst[79:64] := src1[111:96] + dst[95:80] := src2[111:96] + dst[111:96] := src1[127:112] + dst[127:112] := src2[127:112] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0]) + + + SSE2 +
emmintrin.h
+ Swizzle
- - - - Unpack and interleave 32-bit integers from the high half of "a" and "b", and - store the results in "dst". - - DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[95:64] - dst[63:32] := src2[95:64] - dst[95:64] := src1[127:96] - dst[127:96] := src2[127:96] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) - - - SSE2 -
emmintrin.h
- Swizzle + + + + Unpack and interleave 32-bit integers from the high half of "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[95:64] + dst[63:32] := src2[95:64] + dst[95:64] := src1[127:96] + dst[127:96] := src2[127:96] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0]) + + + SSE2 +
emmintrin.h
+ Swizzle
- - - - Unpack and interleave 64-bit integers from the high half of "a" and "b", and - store the results in "dst". - - DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[127:64] - dst[127:64] := src2[127:64] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) - - - SSE2 -
emmintrin.h
- Swizzle + + + + Unpack and interleave 64-bit integers from the high half of "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) + + + SSE2 +
emmintrin.h
+ Swizzle
- - - - Unpack and interleave 8-bit integers from the low half of "a" and "b", and - store the results in "dst". - - DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) { - dst[7:0] := src1[7:0] - dst[15:8] := src2[7:0] - dst[23:16] := src1[15:8] - dst[31:24] := src2[15:8] - dst[39:32] := src1[23:16] - dst[47:40] := src2[23:16] - dst[55:48] := src1[31:24] - dst[63:56] := src2[31:24] - dst[71:64] := src1[39:32] - dst[79:72] := src2[39:32] - dst[87:80] := src1[47:40] - dst[95:88] := src2[47:40] - dst[103:96] := src1[55:48] - dst[111:104] := src2[55:48] - dst[119:112] := src1[63:56] - dst[127:120] := src2[63:56] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) - - - SSE2 -
emmintrin.h
- Swizzle + + + + Unpack and interleave 8-bit integers from the low half of "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) { + dst[7:0] := src1[7:0] + dst[15:8] := src2[7:0] + dst[23:16] := src1[15:8] + dst[31:24] := src2[15:8] + dst[39:32] := src1[23:16] + dst[47:40] := src2[23:16] + dst[55:48] := src1[31:24] + dst[63:56] := src2[31:24] + dst[71:64] := src1[39:32] + dst[79:72] := src2[39:32] + dst[87:80] := src1[47:40] + dst[95:88] := src2[47:40] + dst[103:96] := src1[55:48] + dst[111:104] := src2[55:48] + dst[119:112] := src1[63:56] + dst[127:120] := src2[63:56] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0]) + + + SSE2 +
emmintrin.h
+ Swizzle
- - - - Unpack and interleave 16-bit integers from the low half of "a" and "b", and - store the results in "dst". - - DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) { - dst[15:0] := src1[15:0] - dst[31:16] := src2[15:0] - dst[47:32] := src1[31:16] - dst[63:48] := src2[31:16] - dst[79:64] := src1[47:32] - dst[95:80] := src2[47:32] - dst[111:96] := src1[63:48] - dst[127:112] := src2[63:48] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) - - - SSE2 -
emmintrin.h
- Swizzle + + + + Unpack and interleave 16-bit integers from the low half of "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) { + dst[15:0] := src1[15:0] + dst[31:16] := src2[15:0] + dst[47:32] := src1[31:16] + dst[63:48] := src2[31:16] + dst[79:64] := src1[47:32] + dst[95:80] := src2[47:32] + dst[111:96] := src1[63:48] + dst[127:112] := src2[63:48] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0]) + + + SSE2 +
emmintrin.h
+ Swizzle
- - - - Unpack and interleave 32-bit integers from the low half of "a" and "b", and - store the results in "dst". - - DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { - dst[31:0] := src1[31:0] - dst[63:32] := src2[31:0] - dst[95:64] := src1[63:32] - dst[127:96] := src2[63:32] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) - - - SSE2 -
emmintrin.h
- Swizzle + + + + Unpack and interleave 32-bit integers from the low half of "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) { + dst[31:0] := src1[31:0] + dst[63:32] := src2[31:0] + dst[95:64] := src1[63:32] + dst[127:96] := src2[63:32] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0]) + + + SSE2 +
emmintrin.h
+ Swizzle
- - - - Unpack and interleave 64-bit integers from the low half of "a" and "b", and - store the results in "dst". - - DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[63:0] - dst[127:64] := src2[63:0] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) - - - SSE2 -
emmintrin.h
- Swizzle + + + + Unpack and interleave 64-bit integers from the low half of "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) + + + SSE2 +
emmintrin.h
+ Swizzle
- - - - Unpack and interleave double-precision (64-bit) floating-point elements from - the high half of "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[127:64] - dst[127:64] := src2[127:64] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) - - - SSE2 -
emmintrin.h
- Swizzle + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the high half of "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[127:64] + dst[127:64] := src2[127:64] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) + + + SSE2 +
emmintrin.h
+ Swizzle
- - - - Unpack and interleave double-precision (64-bit) floating-point elements from - the low half of "a" and "b", and store the results in "dst". - - DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { - dst[63:0] := src1[63:0] - dst[127:64] := src2[63:0] - RETURN dst[127:0] - } - dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) - - - SSE2 -
emmintrin.h
- Swizzle + + + + Unpack and interleave double-precision (64-bit) floating-point elements from the low half of "a" and "b", and store the results in "dst". + +DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { + dst[63:0] := src1[63:0] + dst[127:64] := src2[63:0] + RETURN dst[127:0] +} +dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) + + + SSE2 +
emmintrin.h
+ Swizzle
- - - - - Shuffle double-precision (64-bit) floating-point elements using the control in - "imm8", and store the results in "dst". - - dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] - dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] - - - SSE2 -
emmintrin.h
- Swizzle + + + + + Shuffle double-precision (64-bit) floating-point elements using the control in "imm8", and store the results in "dst". + +dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] +dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] + + + SSE2 +
emmintrin.h
+ Swizzle
- - - - Compute the square root of the lower double-precision (64-bit) floating-point - element in "b", store the result in the lower element of "dst", and copy the upper - element from "a" to the upper element of "dst". - - dst[63:0] := SQRT(b[63:0]) - dst[127:64] := a[127:64] - - - SSE2 -
emmintrin.h
- Elementary Math Functions + + + + Compute the square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := SQRT(b[63:0]) +dst[127:64] := a[127:64] + + + SSE2 +
emmintrin.h
+ Elementary Math Functions
- - - Compute the square root of packed double-precision (64-bit) floating-point - elements in "a", and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := SQRT(a[i+63:i]) - ENDFOR - - - SSE2 -
emmintrin.h
- Elementary Math Functions + + + Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := SQRT(a[i+63:i]) +ENDFOR + + + SSE2 +
emmintrin.h
+ Elementary Math Functions
- - - Cast vector of type __m128d to type __m128. This intrinsic is only used for - compilation and does not generate any instructions, thus it has zero latency. - SSE2 -
emmintrin.h
- Cast + + + Cast vector of type __m128d to type __m128. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + SSE2 +
emmintrin.h
+ Cast
- - - Cast vector of type __m128d to type __m128i. This intrinsic is only used for - compilation and does not generate any instructions, thus it has zero latency. - SSE2 -
emmintrin.h
- Cast + + + Cast vector of type __m128d to type __m128i. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + SSE2 +
emmintrin.h
+ Cast
- - - Cast vector of type __m128 to type __m128d. This intrinsic is only used for - compilation and does not generate any instructions, thus it has zero latency. - SSE2 -
emmintrin.h
- Cast + + + Cast vector of type __m128 to type __m128d. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + SSE2 +
emmintrin.h
+ Cast
- - - Cast vector of type __m128 to type __m128i. This intrinsic is only used for - compilation and does not generate any instructions, thus it has zero latency. - SSE2 -
emmintrin.h
- Cast + + + Cast vector of type __m128 to type __m128i. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + SSE2 +
emmintrin.h
+ Cast
- - - Cast vector of type __m128i to type __m128d. This intrinsic is only used for - compilation and does not generate any instructions, thus it has zero latency. - SSE2 -
emmintrin.h
- Cast + + + Cast vector of type __m128i to type __m128d. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + SSE2 +
emmintrin.h
+ Cast
- - - Cast vector of type __m128i to type __m128. This intrinsic is only used for - compilation and does not generate any instructions, thus it has zero latency. - SSE2 -
emmintrin.h
- Cast -
- - - - - - - Alternatively add and subtract packed single-precision (32-bit) floating-point - elements in "a" to/from packed elements in "b", and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - IF ((j & 1) == 0) - dst[i+31:i] := a[i+31:i] - b[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] + b[i+31:i] - FI - ENDFOR - - - SSE3 -
pmmintrin.h
- Arithmetic + + + Cast vector of type __m128i to type __m128. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency. + SSE2 +
emmintrin.h
+ Cast +
+ + + + + + + Alternatively add and subtract packed single-precision (32-bit) floating-point elements in "a" to/from packed elements in "b", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF ((j & 1) == 0) + dst[i+31:i] := a[i+31:i] - b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + b[i+31:i] + FI +ENDFOR + + + SSE3 +
pmmintrin.h
+ Arithmetic
- - - - Alternatively add and subtract packed double-precision (64-bit) floating-point - elements in "a" to/from packed elements in "b", and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - IF ((j & 1) == 0) - dst[i+63:i] := a[i+63:i] - b[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] + b[i+63:i] - FI - ENDFOR - - - SSE3 -
pmmintrin.h
- Arithmetic + + + + Alternatively add and subtract packed double-precision (64-bit) floating-point elements in "a" to/from packed elements in "b", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF ((j & 1) == 0) + dst[i+63:i] := a[i+63:i] - b[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + b[i+63:i] + FI +ENDFOR + + + SSE3 +
pmmintrin.h
+ Arithmetic
- - - - Horizontally add adjacent pairs of double-precision (64-bit) floating-point - elements in "a" and "b", and pack the results in "dst". - - dst[63:0] := a[127:64] + a[63:0] - dst[127:64] := b[127:64] + b[63:0] - - - SSE3 -
pmmintrin.h
- Arithmetic + + + + Horizontally add adjacent pairs of double-precision (64-bit) floating-point elements in "a" and "b", and pack the results in "dst". + +dst[63:0] := a[127:64] + a[63:0] +dst[127:64] := b[127:64] + b[63:0] + + + SSE3 +
pmmintrin.h
+ Arithmetic
- - - - Horizontally add adjacent pairs of single-precision (32-bit) floating-point - elements in "a" and "b", and pack the results in "dst". - - dst[31:0] := a[63:32] + a[31:0] - dst[63:32] := a[127:96] + a[95:64] - dst[95:64] := b[63:32] + b[31:0] - dst[127:96] := b[127:96] + b[95:64] - - - SSE3 -
pmmintrin.h
- Arithmetic + + + + Horizontally add adjacent pairs of single-precision (32-bit) floating-point elements in "a" and "b", and pack the results in "dst". + +dst[31:0] := a[63:32] + a[31:0] +dst[63:32] := a[127:96] + a[95:64] +dst[95:64] := b[63:32] + b[31:0] +dst[127:96] := b[127:96] + b[95:64] + + + SSE3 +
pmmintrin.h
+ Arithmetic
- - - - Horizontally subtract adjacent pairs of double-precision (64-bit) - floating-point elements in "a" and "b", and pack the results in "dst". - - dst[63:0] := a[63:0] - a[127:64] - dst[127:64] := b[63:0] - b[127:64] - - - SSE3 -
pmmintrin.h
- Arithmetic + + + + Horizontally subtract adjacent pairs of double-precision (64-bit) floating-point elements in "a" and "b", and pack the results in "dst". + +dst[63:0] := a[63:0] - a[127:64] +dst[127:64] := b[63:0] - b[127:64] + + + SSE3 +
pmmintrin.h
+ Arithmetic
- - - - Horizontally add adjacent pairs of single-precision (32-bit) floating-point - elements in "a" and "b", and pack the results in "dst". - - dst[31:0] := a[31:0] - a[63:32] - dst[63:32] := a[95:64] - a[127:96] - dst[95:64] := b[31:0] - b[63:32] - dst[127:96] := b[95:64] - b[127:96] - - - SSE3 -
pmmintrin.h
- Arithmetic + + + + Horizontally add adjacent pairs of single-precision (32-bit) floating-point elements in "a" and "b", and pack the results in "dst". + +dst[31:0] := a[31:0] - a[63:32] +dst[63:32] := a[95:64] - a[127:96] +dst[95:64] := b[31:0] - b[63:32] +dst[127:96] := b[95:64] - b[127:96] + + + SSE3 +
pmmintrin.h
+ Arithmetic
- - - Load 128-bits of integer data from unaligned memory into "dst". This intrinsic - may perform better than "_mm_loadu_si128" when the data crosses a cache line boundary. - - dst[127:0] := MEM[mem_addr+127:mem_addr] - - - SSE3 -
pmmintrin.h
- Load + + + Load 128-bits of integer data from unaligned memory into "dst". This intrinsic may perform better than "_mm_loadu_si128" when the data crosses a cache line boundary. + +dst[127:0] := MEM[mem_addr+127:mem_addr] + + + SSE3 +
pmmintrin.h
+ Load
- - - Load a double-precision (64-bit) floating-point element from memory into both - elements of "dst". - - dst[63:0] := MEM[mem_addr+63:mem_addr] - dst[127:64] := MEM[mem_addr+63:mem_addr] - - - SSE3 -
pmmintrin.h
- Load + + + Load a double-precision (64-bit) floating-point element from memory into both elements of "dst". + +dst[63:0] := MEM[mem_addr+63:mem_addr] +dst[127:64] := MEM[mem_addr+63:mem_addr] + + + SSE3 +
pmmintrin.h
+ Load
- - - Duplicate the low double-precision (64-bit) floating-point element from "a", - and store the results in "dst". - - dst[63:0] := a[63:0] - dst[127:64] := a[63:0] - - - SSE3 -
pmmintrin.h
- Move + + + Duplicate the low double-precision (64-bit) floating-point element from "a", and store the results in "dst". + +dst[63:0] := a[63:0] +dst[127:64] := a[63:0] + + + SSE3 +
pmmintrin.h
+ Move
- - - Duplicate odd-indexed single-precision (32-bit) floating-point elements from - "a", and store the results in "dst". - - dst[31:0] := a[63:32] - dst[63:32] := a[63:32] - dst[95:64] := a[127:96] - dst[127:96] := a[127:96] - - - SSE3 -
pmmintrin.h
- Move + + + Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst". + +dst[31:0] := a[63:32] +dst[63:32] := a[63:32] +dst[95:64] := a[127:96] +dst[127:96] := a[127:96] + + + SSE3 +
pmmintrin.h
+ Move
- - - Duplicate even-indexed single-precision (32-bit) floating-point elements from - "a", and store the results in "dst". - - dst[31:0] := a[31:0] - dst[63:32] := a[31:0] - dst[95:64] := a[95:64] - dst[127:96] := a[95:64] - - - SSE3 -
pmmintrin.h
- Move -
- - - - - - - - Blend packed double-precision (64-bit) floating-point elements from "a" and "b" - using control mask "imm8", and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - IF imm8[j] - dst[i+63:i] := b[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - - - SSE4.1 -
smmintrin.h
- Swizzle + + + Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst". + +dst[31:0] := a[31:0] +dst[63:32] := a[31:0] +dst[95:64] := a[95:64] +dst[127:96] := a[95:64] + + + SSE3 +
pmmintrin.h
+ Move +
+ + + + + + + + Blend packed double-precision (64-bit) floating-point elements from "a" and "b" using control mask "imm8", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF imm8[j] + dst[i+63:i] := b[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Swizzle
- - - - - Blend packed single-precision (32-bit) floating-point elements from "a" and "b" - using control mask "imm8", and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - IF imm8[j] - dst[i+31:i] := b[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - - - SSE4.1 -
smmintrin.h
- Swizzle + + + + + Blend packed single-precision (32-bit) floating-point elements from "a" and "b" using control mask "imm8", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF imm8[j] + dst[i+31:i] := b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Swizzle
- - - - - Blend packed double-precision (64-bit) floating-point elements from "a" and "b" - using "mask", and store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - IF mask[i+63] - dst[i+63:i] := b[i+63:i] - ELSE - dst[i+63:i] := a[i+63:i] - FI - ENDFOR - - - SSE4.1 -
smmintrin.h
- Swizzle + + + + + Blend packed double-precision (64-bit) floating-point elements from "a" and "b" using "mask", and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + IF mask[i+63] + dst[i+63:i] := b[i+63:i] + ELSE + dst[i+63:i] := a[i+63:i] + FI +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Swizzle
- - - - - Blend packed single-precision (32-bit) floating-point elements from "a" and "b" - using "mask", and store the results in "dst". - - FOR j := 0 to 3 - i := j*32 - IF mask[i+31] - dst[i+31:i] := b[i+31:i] - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - - - SSE4.1 -
smmintrin.h
- Swizzle + + + + + Blend packed single-precision (32-bit) floating-point elements from "a" and "b" using "mask", and store the results in "dst". + +FOR j := 0 to 3 + i := j*32 + IF mask[i+31] + dst[i+31:i] := b[i+31:i] + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Swizzle
- - - - - Blend packed 8-bit integers from "a" and "b" using "mask", and store the - results in "dst". - - FOR j := 0 to 15 - i := j*8 - IF mask[i+7] - dst[i+7:i] := b[i+7:i] - ELSE - dst[i+7:i] := a[i+7:i] - FI - ENDFOR - - - SSE4.1 -
smmintrin.h
- Swizzle + + + + + Blend packed 8-bit integers from "a" and "b" using "mask", and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + IF mask[i+7] + dst[i+7:i] := b[i+7:i] + ELSE + dst[i+7:i] := a[i+7:i] + FI +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Swizzle
- - - - - Blend packed 16-bit integers from "a" and "b" using control mask "imm8", and - store the results in "dst". - - FOR j := 0 to 7 - i := j*16 - IF imm8[j] - dst[i+15:i] := b[i+15:i] - ELSE - dst[i+15:i] := a[i+15:i] - FI - ENDFOR - - - SSE4.1 -
smmintrin.h
- Swizzle + + + + + Blend packed 16-bit integers from "a" and "b" using control mask "imm8", and store the results in "dst". + +FOR j := 0 to 7 + i := j*16 + IF imm8[j] + dst[i+15:i] := b[i+15:i] + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Swizzle
- - - - Extract a single-precision (32-bit) floating-point element from "a", selected - with "imm8", and store the result in "dst". - - dst[31:0] := (a[127:0] >> (imm8[1:0] * 32))[31:0] - - - SSE4.1 -
smmintrin.h
- Swizzle + + + + Extract a single-precision (32-bit) floating-point element from "a", selected with "imm8", and store the result in "dst". + +dst[31:0] := (a[127:0] >> (imm8[1:0] * 32))[31:0] + + + SSE4.1 +
smmintrin.h
+ Swizzle
- - - - Extract an 8-bit integer from "a", selected with "imm8", and store the result - in the lower element of "dst". - - dst[7:0] := (a[127:0] >> (imm8[3:0] * 8))[7:0] - dst[31:8] := 0 - - - SSE4.1 -
smmintrin.h
- Swizzle + + + + Extract an 8-bit integer from "a", selected with "imm8", and store the result in the lower element of "dst". + +dst[7:0] := (a[127:0] >> (imm8[3:0] * 8))[7:0] +dst[31:8] := 0 + + + SSE4.1 +
smmintrin.h
+ Swizzle
- - - - Extract a 32-bit integer from "a", selected with "imm8", and store the result - in "dst". - - dst[31:0] := (a[127:0] >> (imm8[1:0] * 32))[31:0] - - - SSE4.1 -
smmintrin.h
- Swizzle + + + + Extract a 32-bit integer from "a", selected with "imm8", and store the result in "dst". + +dst[31:0] := (a[127:0] >> (imm8[1:0] * 32))[31:0] + + + SSE4.1 +
smmintrin.h
+ Swizzle
- - - - Extract a 64-bit integer from "a", selected with "imm8", and store the result - in "dst". - - dst[63:0] := (a[127:0] >> (imm8[0] * 64))[63:0] - - - SSE4.1 -
smmintrin.h
- Swizzle + + + + Extract a 64-bit integer from "a", selected with "imm8", and store the result in "dst". + +dst[63:0] := (a[127:0] >> (imm8[0] * 64))[63:0] + + + SSE4.1 +
smmintrin.h
+ Swizzle
- - - - - Copy "a" to "tmp", then insert a single-precision (32-bit) floating-point - element from "b" into "tmp" using the control in "imm8". Store "tmp" to "dst" using the - mask in "imm8" (elements are zeroed out when the corresponding bit is set). - - tmp2[127:0] := a[127:0] - CASE (imm8[7:6]) OF - 0: tmp1[31:0] := b[31:0] - 1: tmp1[31:0] := b[63:32] - 2: tmp1[31:0] := b[95:64] - 3: tmp1[31:0] := b[127:96] - ESAC - CASE (imm8[5:4]) OF - 0: tmp2[31:0] := tmp1[31:0] - 1: tmp2[63:32] := tmp1[31:0] - 2: tmp2[95:64] := tmp1[31:0] - 3: tmp2[127:96] := tmp1[31:0] - ESAC - FOR j := 0 to 3 - i := j*32 - IF imm8[j%8] - dst[i+31:i] := 0 - ELSE - dst[i+31:i] := tmp2[i+31:i] - FI - ENDFOR - - - SSE4.1 -
smmintrin.h
- Swizzle + + + + + Copy "a" to "tmp", then insert a single-precision (32-bit) floating-point element from "b" into "tmp" using the control in "imm8". Store "tmp" to "dst" using the mask in "imm8" (elements are zeroed out when the corresponding bit is set). + +tmp2[127:0] := a[127:0] +CASE (imm8[7:6]) OF +0: tmp1[31:0] := b[31:0] +1: tmp1[31:0] := b[63:32] +2: tmp1[31:0] := b[95:64] +3: tmp1[31:0] := b[127:96] +ESAC +CASE (imm8[5:4]) OF +0: tmp2[31:0] := tmp1[31:0] +1: tmp2[63:32] := tmp1[31:0] +2: tmp2[95:64] := tmp1[31:0] +3: tmp2[127:96] := tmp1[31:0] +ESAC +FOR j := 0 to 3 + i := j*32 + IF imm8[j%8] + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := tmp2[i+31:i] + FI +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Swizzle
- - - - - Copy "a" to "dst", and insert the lower 8-bit integer from "i" into "dst" at - the location specified by "imm8". - - dst[127:0] := a[127:0] - sel := imm8[3:0]*8 - dst[sel+7:sel] := i[7:0] - - - SSE4.1 -
smmintrin.h
- Swizzle + + + + + Copy "a" to "dst", and insert the lower 8-bit integer from "i" into "dst" at the location specified by "imm8". + +dst[127:0] := a[127:0] +sel := imm8[3:0]*8 +dst[sel+7:sel] := i[7:0] + + + SSE4.1 +
smmintrin.h
+ Swizzle
- - - - - Copy "a" to "dst", and insert the 32-bit integer "i" into "dst" at the location - specified by "imm8". - - dst[127:0] := a[127:0] - sel := imm8[1:0]*32 - dst[sel+31:sel] := i[31:0] - - - SSE4.1 -
smmintrin.h
- Swizzle + + + + + Copy "a" to "dst", and insert the 32-bit integer "i" into "dst" at the location specified by "imm8". + +dst[127:0] := a[127:0] +sel := imm8[1:0]*32 +dst[sel+31:sel] := i[31:0] + + + SSE4.1 +
smmintrin.h
+ Swizzle
- - - - - Copy "a" to "dst", and insert the 64-bit integer "i" into "dst" at the location - specified by "imm8". - - dst[127:0] := a[127:0] - sel := imm8[0]*64 - dst[sel+63:sel] := i[63:0] - - - SSE4.1 -
smmintrin.h
- Swizzle + + + + + Copy "a" to "dst", and insert the 64-bit integer "i" into "dst" at the location specified by "imm8". + +dst[127:0] := a[127:0] +sel := imm8[0]*64 +dst[sel+63:sel] := i[63:0] + + + SSE4.1 +
smmintrin.h
+ Swizzle
- - - - - Conditionally multiply the packed double-precision (64-bit) floating-point - elements in "a" and "b" using the high 4 bits in "imm8", sum the four products, and - conditionally store the sum in "dst" using the low 4 bits of "imm8". - - DEFINE DP(a[127:0], b[127:0], imm8[7:0]) { - FOR j := 0 to 1 - i := j*64 - IF imm8[(4+j)%8] + + + + + Conditionally multiply the packed double-precision (64-bit) floating-point elements in "a" and "b" using the high 4 bits in "imm8", sum the four products, and conditionally store the sum in "dst" using the low 4 bits of "imm8". + +DEFINE DP(a[127:0], b[127:0], imm8[7:0]) { + FOR j := 0 to 1 + i := j*64 + IF imm8[(4+j)%8] temp[i+63:i] := a[i+63:i] * b[i+63:i] - ELSE + ELSE temp[i+63:i] := 0.0 - FI - ENDFOR - - sum[63:0] := temp[127:64] + temp[63:0] - - FOR j := 0 to 1 - i := j*64 - IF imm8[j%8] + FI + ENDFOR + + sum[63:0] := temp[127:64] + temp[63:0] + + FOR j := 0 to 1 + i := j*64 + IF imm8[j%8] tmpdst[i+63:i] := sum[63:0] - ELSE + ELSE tmpdst[i+63:i] := 0.0 - FI - ENDFOR - RETURN tmpdst[127:0] - } - dst[127:0] := DP(a[127:0], b[127:0], imm8[7:0]) - - - SSE4.1 -
smmintrin.h
- Arithmetic + FI + ENDFOR + RETURN tmpdst[127:0] +} +dst[127:0] := DP(a[127:0], b[127:0], imm8[7:0]) +
+ + SSE4.1 +
smmintrin.h
+ Arithmetic
- - - - - Conditionally multiply the packed single-precision (32-bit) floating-point - elements in "a" and "b" using the high 4 bits in "imm8", sum the four products, and - conditionally store the sum in "dst" using the low 4 bits of "imm8". - - DEFINE DP(a[127:0], b[127:0], imm8[7:0]) { - FOR j := 0 to 3 - i := j*32 - IF imm8[(4+j)%8] + + + + + Conditionally multiply the packed single-precision (32-bit) floating-point elements in "a" and "b" using the high 4 bits in "imm8", sum the four products, and conditionally store the sum in "dst" using the low 4 bits of "imm8". + +DEFINE DP(a[127:0], b[127:0], imm8[7:0]) { + FOR j := 0 to 3 + i := j*32 + IF imm8[(4+j)%8] temp[i+31:i] := a[i+31:i] * b[i+31:i] - ELSE + ELSE temp[i+31:i] := 0 - FI - ENDFOR - - sum[31:0] := (temp[127:96] + temp[95:64]) + (temp[63:32] + temp[31:0]) - - FOR j := 0 to 3 - i := j*32 - IF imm8[j%8] + FI + ENDFOR + + sum[31:0] := (temp[127:96] + temp[95:64]) + (temp[63:32] + temp[31:0]) + + FOR j := 0 to 3 + i := j*32 + IF imm8[j%8] tmpdst[i+31:i] := sum[31:0] - ELSE + ELSE tmpdst[i+31:i] := 0 - FI - ENDFOR - RETURN tmpdst[127:0] - } - dst[127:0] := DP(a[127:0], b[127:0], imm8[7:0]) - - - SSE4.1 -
smmintrin.h
- Arithmetic + FI + ENDFOR + RETURN tmpdst[127:0] +} +dst[127:0] := DP(a[127:0], b[127:0], imm8[7:0]) +
+ + SSE4.1 +
smmintrin.h
+ Arithmetic
- - - - Multiply the low signed 32-bit integers from each packed 64-bit element in "a" - and "b", and store the signed 64-bit results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i]) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Arithmetic + + + + Multiply the low signed 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i]) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Arithmetic
- - - - Multiply the packed 32-bit integers in "a" and "b", producing intermediate - 64-bit integers, and store the low 32 bits of the intermediate integers in "dst". - - FOR j := 0 to 3 - i := j*32 - tmp[63:0] := a[i+31:i] * b[i+31:i] - dst[i+31:i] := tmp[31:0] - ENDFOR - - - SSE4.1 -
smmintrin.h
- Arithmetic + + + + Multiply the packed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst". + +FOR j := 0 to 3 + i := j*32 + tmp[63:0] := a[i+31:i] * b[i+31:i] + dst[i+31:i] := tmp[31:0] +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Arithmetic
- Miscellaneous - - - - - Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit - integers in "a" compared to those in "b", and store the 16-bit results in "dst". - Eight SADs are performed using one quadruplet from "b" and eight quadruplets from "a". - One quadruplet is selected from "b" starting at on the offset specified in "imm8". Eight - quadruplets are formed from sequential 8-bit integers selected from "a" starting at the - offset specified in "imm8". - - DEFINE MPSADBW(a[127:0], b[127:0], imm8[2:0]) { - a_offset := imm8[2]*32 - b_offset := imm8[1:0]*32 - FOR j := 0 to 7 - i := j*8 - k := a_offset+i - l := b_offset - tmp[i*2+15:i*2] := ABS(Signed(a[k+7:k] - b[l+7:l])) + ABS(Signed(a[k+15:k+8] - - b[l+15:l+8])) + \ - ABS(Signed(a[k+23:k+16] - b[l+23:l+16])) + ABS(Signed(a[k+31:k+24] - b[l+31:l+24])) - ENDFOR - RETURN tmp[127:0] - } - dst[127:0] := MPSADBW(a[127:0], b[127:0], imm8[2:0]) - - - SSE4.1 -
smmintrin.h
- Arithmetic + Miscellaneous + + + + + Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst". + Eight SADs are performed using one quadruplet from "b" and eight quadruplets from "a". One quadruplet is selected from "b" starting at on the offset specified in "imm8". Eight quadruplets are formed from sequential 8-bit integers selected from "a" starting at the offset specified in "imm8". + +DEFINE MPSADBW(a[127:0], b[127:0], imm8[2:0]) { + a_offset := imm8[2]*32 + b_offset := imm8[1:0]*32 + FOR j := 0 to 7 + i := j*8 + k := a_offset+i + l := b_offset + tmp[i*2+15:i*2] := ABS(Signed(a[k+7:k] - b[l+7:l])) + ABS(Signed(a[k+15:k+8] - b[l+15:l+8])) + \ + ABS(Signed(a[k+23:k+16] - b[l+23:l+16])) + ABS(Signed(a[k+31:k+24] - b[l+31:l+24])) + ENDFOR + RETURN tmp[127:0] +} +dst[127:0] := MPSADBW(a[127:0], b[127:0], imm8[2:0]) + + + SSE4.1 +
smmintrin.h
+ Arithmetic
- - - - Compare packed signed 8-bit integers in "a" and "b", and store packed maximum - values in "dst". - - FOR j := 0 to 15 - i := j*8 - dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Special Math Functions + + + + Compare packed signed 8-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Special Math Functions
- - - - Compare packed signed 32-bit integers in "a" and "b", and store packed maximum - values in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Special Math Functions + + + + Compare packed signed 32-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Special Math Functions
- - - - Compare packed unsigned 32-bit integers in "a" and "b", and store packed - maximum values in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Special Math Functions + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := MAX(a[i+31:i], b[i+31:i]) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Special Math Functions
- - - - Compare packed unsigned 16-bit integers in "a" and "b", and store packed - maximum values in "dst". - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Special Math Functions + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Special Math Functions
- - - - Compare packed signed 8-bit integers in "a" and "b", and store packed minimum - values in "dst". - - FOR j := 0 to 15 - i := j*8 - dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Special Math Functions + + + + Compare packed signed 8-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Special Math Functions
- - - - Compare packed signed 32-bit integers in "a" and "b", and store packed minimum - values in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Special Math Functions + + + + Compare packed signed 32-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Special Math Functions
- - - - Compare packed unsigned 32-bit integers in "a" and "b", and store packed - minimum values in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Special Math Functions + + + + Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := MIN(a[i+31:i], b[i+31:i]) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Special Math Functions
- - - - Compare packed unsigned 16-bit integers in "a" and "b", and store packed - minimum values in "dst". - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Special Math Functions + + + + Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Special Math Functions
- - - - Round the packed double-precision (64-bit) floating-point elements in "a" using - the "rounding" parameter, and store the results as packed double-precision - floating-point elements in "dst". - [round_note] - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := ROUND(a[i+63:i], rounding) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Special Math Functions + + + + Round the packed double-precision (64-bit) floating-point elements in "a" using the "rounding" parameter, and store the results as packed double-precision floating-point elements in "dst". + [round_note] + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ROUND(a[i+63:i], rounding) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Special Math Functions
- - - Round the packed double-precision (64-bit) floating-point elements in "a" down - to an integer value, and store the results as packed double-precision floating-point - elements in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := FLOOR(a[i+63:i]) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Special Math Functions + + + Round the packed double-precision (64-bit) floating-point elements in "a" down to an integer value, and store the results as packed double-precision floating-point elements in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := FLOOR(a[i+63:i]) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Special Math Functions
- - - Round the packed double-precision (64-bit) floating-point elements in "a" up to - an integer value, and store the results as packed double-precision floating-point - elements in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := CEIL(a[i+63:i]) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Special Math Functions + + + Round the packed double-precision (64-bit) floating-point elements in "a" up to an integer value, and store the results as packed double-precision floating-point elements in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := CEIL(a[i+63:i]) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Special Math Functions
- - - - Round the packed single-precision (32-bit) floating-point elements in "a" using - the "rounding" parameter, and store the results as packed single-precision - floating-point elements in "dst". - [round_note] - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ROUND(a[i+31:i], rounding) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Special Math Functions + + + + Round the packed single-precision (32-bit) floating-point elements in "a" using the "rounding" parameter, and store the results as packed single-precision floating-point elements in "dst". + [round_note] + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ROUND(a[i+31:i], rounding) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Special Math Functions
- - - Round the packed single-precision (32-bit) floating-point elements in "a" down - to an integer value, and store the results as packed single-precision floating-point - elements in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := FLOOR(a[i+31:i]) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Special Math Functions + + + Round the packed single-precision (32-bit) floating-point elements in "a" down to an integer value, and store the results as packed single-precision floating-point elements in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := FLOOR(a[i+31:i]) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Special Math Functions
- - - Round the packed single-precision (32-bit) floating-point elements in "a" up to - an integer value, and store the results as packed single-precision floating-point - elements in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := CEIL(a[i+31:i]) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Special Math Functions + + + Round the packed single-precision (32-bit) floating-point elements in "a" up to an integer value, and store the results as packed single-precision floating-point elements in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := CEIL(a[i+31:i]) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Special Math Functions
- - - - - Round the lower double-precision (64-bit) floating-point element in "b" using - the "rounding" parameter, store the result as a double-precision floating-point element - in the lower element of "dst", and copy the upper element from "a" to the upper element - of "dst". - [round_note] - - dst[63:0] := ROUND(b[63:0], rounding) - dst[127:64] := a[127:64] - - - SSE4.1 -
smmintrin.h
- Special Math Functions + + + + + Round the lower double-precision (64-bit) floating-point element in "b" using the "rounding" parameter, store the result as a double-precision floating-point element in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + [round_note] + +dst[63:0] := ROUND(b[63:0], rounding) +dst[127:64] := a[127:64] + + + SSE4.1 +
smmintrin.h
+ Special Math Functions
- - - - Round the lower double-precision (64-bit) floating-point element in "b" down to - an integer value, store the result as a double-precision floating-point element in the - lower element of "dst", and copy the upper element from "a" to the upper element of - "dst". - - dst[63:0] := FLOOR(b[63:0]) - dst[127:64] := a[127:64] - - - SSE4.1 -
smmintrin.h
- Special Math Functions + + + + Round the lower double-precision (64-bit) floating-point element in "b" down to an integer value, store the result as a double-precision floating-point element in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := FLOOR(b[63:0]) +dst[127:64] := a[127:64] + + + SSE4.1 +
smmintrin.h
+ Special Math Functions
- - - - Round the lower double-precision (64-bit) floating-point element in "b" up to - an integer value, store the result as a double-precision floating-point element in the - lower element of "dst", and copy the upper element from "a" to the upper element of - "dst". - - dst[63:0] := CEIL(b[63:0]) - dst[127:64] := a[127:64] - - - SSE4.1 -
smmintrin.h
- Special Math Functions + + + + Round the lower double-precision (64-bit) floating-point element in "b" up to an integer value, store the result as a double-precision floating-point element in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". + +dst[63:0] := CEIL(b[63:0]) +dst[127:64] := a[127:64] + + + SSE4.1 +
smmintrin.h
+ Special Math Functions
- - - - - Round the lower single-precision (32-bit) floating-point element in "b" using - the "rounding" parameter, store the result as a single-precision floating-point element - in the lower element of "dst", and copy the upper 3 packed elements from "a" to the - upper elements of "dst". - [round_note] - - dst[31:0] := ROUND(b[31:0], rounding) - dst[127:32] := a[127:32] - - - SSE4.1 -
smmintrin.h
- Special Math Functions + + + + + Round the lower single-precision (32-bit) floating-point element in "b" using the "rounding" parameter, store the result as a single-precision floating-point element in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + [round_note] + +dst[31:0] := ROUND(b[31:0], rounding) +dst[127:32] := a[127:32] + + + SSE4.1 +
smmintrin.h
+ Special Math Functions
- - - - Round the lower single-precision (32-bit) floating-point element in "b" down to - an integer value, store the result as a single-precision floating-point element in the - lower element of "dst", and copy the upper 3 packed elements from "a" to the upper - elements of "dst". - - dst[31:0] := FLOOR(b[31:0]) - dst[127:32] := a[127:32] - - - SSE4.1 -
smmintrin.h
- Special Math Functions + + + + Round the lower single-precision (32-bit) floating-point element in "b" down to an integer value, store the result as a single-precision floating-point element in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := FLOOR(b[31:0]) +dst[127:32] := a[127:32] + + + SSE4.1 +
smmintrin.h
+ Special Math Functions
- - - - Round the lower single-precision (32-bit) floating-point element in "b" up to - an integer value, store the result as a single-precision floating-point element in the - lower element of "dst", and copy the upper 3 packed elements from "a" to the upper - elements of "dst". - - dst[31:0] := CEIL(b[31:0]) - dst[127:32] := a[127:32] - - - SSE4.1 -
smmintrin.h
- Special Math Functions + + + + Round the lower single-precision (32-bit) floating-point element in "b" up to an integer value, store the result as a single-precision floating-point element in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". + +dst[31:0] := CEIL(b[31:0]) +dst[127:32] := a[127:32] + + + SSE4.1 +
smmintrin.h
+ Special Math Functions
- Miscellaneous - - - - Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit - integers using unsigned saturation, and store the results in "dst". - - dst[15:0] := SaturateU16(a[31:0]) - dst[31:16] := SaturateU16(a[63:32]) - dst[47:32] := SaturateU16(a[95:64]) - dst[63:48] := SaturateU16(a[127:96]) - dst[79:64] := SaturateU16(b[31:0]) - dst[95:80] := SaturateU16(b[63:32]) - dst[111:96] := SaturateU16(b[95:64]) - dst[127:112] := SaturateU16(b[127:96]) - - - SSE4.1 -
smmintrin.h
- Convert + Miscellaneous + + + + Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst". + +dst[15:0] := SaturateU16(a[31:0]) +dst[31:16] := SaturateU16(a[63:32]) +dst[47:32] := SaturateU16(a[95:64]) +dst[63:48] := SaturateU16(a[127:96]) +dst[79:64] := SaturateU16(b[31:0]) +dst[95:80] := SaturateU16(b[63:32]) +dst[111:96] := SaturateU16(b[95:64]) +dst[127:112] := SaturateU16(b[127:96]) + + + SSE4.1 +
smmintrin.h
+ Convert
- - - Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store - the results in "dst". - - FOR j := 0 to 7 - i := j*8 - l := j*16 - dst[l+15:l] := SignExtend16(a[i+7:i]) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Convert + + + Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + l := j*16 + dst[l+15:l] := SignExtend16(a[i+7:i]) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Convert
- - - Sign extend packed 8-bit integers in "a" to packed 32-bit integers, and store - the results in "dst". - - FOR j := 0 to 3 - i := 32*j - k := 8*j - dst[i+31:i] := SignExtend32(a[k+7:k]) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Convert + + + Sign extend packed 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 8*j + dst[i+31:i] := SignExtend32(a[k+7:k]) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Convert
- - - Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 64-bit - integers, and store the results in "dst". - - FOR j := 0 to 1 - i := 64*j - k := 8*j - dst[i+63:i] := SignExtend64(a[k+7:k]) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Convert + + + Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 8*j + dst[i+63:i] := SignExtend64(a[k+7:k]) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Convert
- - - Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store - the results in "dst". - - FOR j := 0 to 3 - i := 32*j - k := 16*j - dst[i+31:i] := SignExtend32(a[k+15:k]) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Convert + + + Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 16*j + dst[i+31:i] := SignExtend32(a[k+15:k]) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Convert
- - - Sign extend packed 16-bit integers in "a" to packed 64-bit integers, and store - the results in "dst". - - FOR j := 0 to 1 - i := 64*j - k := 16*j - dst[i+63:i] := SignExtend64(a[k+15:k]) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Convert + + + Sign extend packed 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 16*j + dst[i+63:i] := SignExtend64(a[k+15:k]) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Convert
- - - Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store - the results in "dst". - - FOR j := 0 to 1 - i := 64*j - k := 32*j - dst[i+63:i] := SignExtend64(a[k+31:k]) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Convert + + + Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 32*j + dst[i+63:i] := SignExtend64(a[k+31:k]) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Convert
- - - Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, - and store the results in "dst". - - FOR j := 0 to 7 - i := j*8 - l := j*16 - dst[l+15:l] := ZeroExtend16(a[i+7:i]) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Convert + + + Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + l := j*16 + dst[l+15:l] := ZeroExtend16(a[i+7:i]) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Convert
- - - Zero extend packed unsigned 8-bit integers in "a" to packed 32-bit integers, - and store the results in "dst". - - FOR j := 0 to 3 - i := 32*j - k := 8*j - dst[i+31:i] := ZeroExtend32(a[k+7:k]) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Convert + + + Zero extend packed unsigned 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 8*j + dst[i+31:i] := ZeroExtend32(a[k+7:k]) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Convert
- - - Zero extend packed unsigned 8-bit integers in the low 8 byte sof "a" to packed - 64-bit integers, and store the results in "dst". - - FOR j := 0 to 1 - i := 64*j - k := 8*j - dst[i+63:i] := ZeroExtend64(a[k+7:k]) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Convert + + + Zero extend packed unsigned 8-bit integers in the low 8 byte sof "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 8*j + dst[i+63:i] := ZeroExtend64(a[k+7:k]) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Convert
- - - Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, - and store the results in "dst". - - FOR j := 0 to 3 - i := 32*j - k := 16*j - dst[i+31:i] := ZeroExtend32(a[k+15:k]) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Convert + + + Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst". + +FOR j := 0 to 3 + i := 32*j + k := 16*j + dst[i+31:i] := ZeroExtend32(a[k+15:k]) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Convert
- - - Zero extend packed unsigned 16-bit integers in "a" to packed 64-bit integers, - and store the results in "dst". - - FOR j := 0 to 1 - i := 64*j - k := 16*j - dst[i+63:i] := ZeroExtend64(a[k+15:k]) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Convert + + + Zero extend packed unsigned 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 16*j + dst[i+63:i] := ZeroExtend64(a[k+15:k]) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Convert
- - - Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, - and store the results in "dst". - - FOR j := 0 to 1 - i := 64*j - k := 32*j - dst[i+63:i] := ZeroExtend64(a[k+31:k]) - ENDFOR - - - SSE4.1 -
smmintrin.h
- Convert + + + Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst". + +FOR j := 0 to 1 + i := 64*j + k := 32*j + dst[i+63:i] := ZeroExtend64(a[k+31:k]) +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Convert
- - - - Compare packed 64-bit integers in "a" and "b" for equality, and store the - results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := ( a[i+63:i] == b[i+63:i] ) ? 0xFFFFFFFFFFFFFFFF : 0 - ENDFOR - - - SSE4.1 -
smmintrin.h
- Compare + + + + Compare packed 64-bit integers in "a" and "b" for equality, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ( a[i+63:i] == b[i+63:i] ) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + + SSE4.1 +
smmintrin.h
+ Compare
- - - - Compute the bitwise AND of 128 bits (representing integer data) in "a" and "b", - and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise - NOT of "a" and then AND with "b", and set "CF" to 1 if the result is zero, otherwise set - "CF" to 0. Return the "ZF" value. - - IF ((a[127:0] AND b[127:0]) == 0) - ZF := 1 - ELSE - ZF := 0 - FI - IF (((NOT a[127:0]) AND b[127:0]) == 0) - CF := 1 - ELSE - CF := 0 - FI - RETURN ZF - - - SSE4.1 -
smmintrin.h
- Logical + + + + Compute the bitwise AND of 128 bits (representing integer data) in "a" and "b", and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", and set "CF" to 1 if the result is zero, otherwise set "CF" to 0. Return the "ZF" value. + +IF ((a[127:0] AND b[127:0]) == 0) + ZF := 1 +ELSE + ZF := 0 +FI +IF (((NOT a[127:0]) AND b[127:0]) == 0) + CF := 1 +ELSE + CF := 0 +FI +RETURN ZF + + + SSE4.1 +
smmintrin.h
+ Logical
- - - - Compute the bitwise AND of 128 bits (representing integer data) in "a" and "b", - and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise - NOT of "a" and then AND with "b", and set "CF" to 1 if the result is zero, otherwise set - "CF" to 0. Return the "CF" value. - - IF ((a[127:0] AND b[127:0]) == 0) - ZF := 1 - ELSE - ZF := 0 - FI - IF (((NOT a[127:0]) AND b[127:0]) == 0) - CF := 1 - ELSE - CF := 0 - FI - RETURN CF - - - SSE4.1 -
smmintrin.h
- Logical + + + + Compute the bitwise AND of 128 bits (representing integer data) in "a" and "b", and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", and set "CF" to 1 if the result is zero, otherwise set "CF" to 0. Return the "CF" value. + +IF ((a[127:0] AND b[127:0]) == 0) + ZF := 1 +ELSE + ZF := 0 +FI +IF (((NOT a[127:0]) AND b[127:0]) == 0) + CF := 1 +ELSE + CF := 0 +FI +RETURN CF + + + SSE4.1 +
smmintrin.h
+ Logical
- - - - Compute the bitwise AND of 128 bits (representing integer data) in "a" and "b", - and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise - NOT of "a" and then AND with "b", and set "CF" to 1 if the result is zero, otherwise set - "CF" to 0. Return 1 if both the "ZF" and "CF" values are zero, otherwise return 0. - - IF ((a[127:0] AND b[127:0]) == 0) - ZF := 1 - ELSE - ZF := 0 - FI - IF (((NOT a[127:0]) AND b[127:0]) == 0) - CF := 1 - ELSE - CF := 0 - FI - IF (ZF == 0 && CF == 0) - dst := 1 - ELSE - dst := 0 - FI - - - SSE4.1 -
smmintrin.h
- Logical + + + + Compute the bitwise AND of 128 bits (representing integer data) in "a" and "b", and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", and set "CF" to 1 if the result is zero, otherwise set "CF" to 0. Return 1 if both the "ZF" and "CF" values are zero, otherwise return 0. + +IF ((a[127:0] AND b[127:0]) == 0) + ZF := 1 +ELSE + ZF := 0 +FI +IF (((NOT a[127:0]) AND b[127:0]) == 0) + CF := 1 +ELSE + CF := 0 +FI +IF (ZF == 0 && CF == 0) + dst := 1 +ELSE + dst := 0 +FI + + + SSE4.1 +
smmintrin.h
+ Logical
- - - - Compute the bitwise AND of 128 bits (representing integer data) in "a" and - "mask", and return 1 if the result is zero, otherwise return 0. - - IF ((a[127:0] AND mask[127:0]) == 0) - ZF := 1 - ELSE - ZF := 0 - FI - dst := ZF - - - SSE4.1 -
smmintrin.h
- Logical + + + + Compute the bitwise AND of 128 bits (representing integer data) in "a" and "mask", and return 1 if the result is zero, otherwise return 0. + +IF ((a[127:0] AND mask[127:0]) == 0) + ZF := 1 +ELSE + ZF := 0 +FI +dst := ZF + + + SSE4.1 +
smmintrin.h
+ Logical
- - - - Compute the bitwise AND of 128 bits (representing integer data) in "a" and - "mask", and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the - bitwise NOT of "a" and then AND with "mask", and set "CF" to 1 if the result is zero, - otherwise set "CF" to 0. Return 1 if both the "ZF" and "CF" values are zero, otherwise - return 0. - - IF ((a[127:0] AND mask[127:0]) == 0) - ZF := 1 - ELSE - ZF := 0 - FI - IF (((NOT a[127:0]) AND mask[127:0]) == 0) - CF := 1 - ELSE - CF := 0 - FI - IF (ZF == 0 && CF == 0) - dst := 1 - ELSE - dst := 0 - FI - - - SSE4.1 -
smmintrin.h
- Logical + + + + Compute the bitwise AND of 128 bits (representing integer data) in "a" and "mask", and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "mask", and set "CF" to 1 if the result is zero, otherwise set "CF" to 0. Return 1 if both the "ZF" and "CF" values are zero, otherwise return 0. + +IF ((a[127:0] AND mask[127:0]) == 0) + ZF := 1 +ELSE + ZF := 0 +FI +IF (((NOT a[127:0]) AND mask[127:0]) == 0) + CF := 1 +ELSE + CF := 0 +FI +IF (ZF == 0 && CF == 0) + dst := 1 +ELSE + dst := 0 +FI + + + SSE4.1 +
smmintrin.h
+ Logical
- - - Compute the bitwise NOT of "a" and then AND with a 128-bit vector containing - all 1's, and return 1 if the result is zero, otherwise return 0. - - FOR j := 0 to 127 - tmp[j] := 1 - ENDFOR - IF (((NOT a[127:0]) AND tmp[127:0]) == 0) - CF := 1 - ELSE - CF := 0 - FI - dst := CF - - - - SSE4.1 -
smmintrin.h
- Logical + + + Compute the bitwise NOT of "a" and then AND with a 128-bit vector containing all 1's, and return 1 if the result is zero, otherwise return 0. + +FOR j := 0 to 127 + tmp[j] := 1 +ENDFOR +IF (((NOT a[127:0]) AND tmp[127:0]) == 0) + CF := 1 +ELSE + CF := 0 +FI +dst := CF + + + + SSE4.1 +
smmintrin.h
+ Logical
- - - Horizontally compute the minimum amongst the packed unsigned 16-bit integers in - "a", store the minimum and index in "dst", and zero the remaining bits in "dst". - - index[2:0] := 0 - min[15:0] := a[15:0] - FOR j := 0 to 7 - i := j*16 - IF a[i+15:i] < min[15:0] - index[2:0] := j - min[15:0] := a[i+15:i] - FI - ENDFOR - dst[15:0] := min[15:0] - dst[18:16] := index[2:0] - dst[127:19] := 0 - - - SSE4.1 -
smmintrin.h
- Miscellaneous + + + Horizontally compute the minimum amongst the packed unsigned 16-bit integers in "a", store the minimum and index in "dst", and zero the remaining bits in "dst". + +index[2:0] := 0 +min[15:0] := a[15:0] +FOR j := 0 to 7 + i := j*16 + IF a[i+15:i] < min[15:0] + index[2:0] := j + min[15:0] := a[i+15:i] + FI +ENDFOR +dst[15:0] := min[15:0] +dst[18:16] := index[2:0] +dst[127:19] := 0 + + + SSE4.1 +
smmintrin.h
+ Miscellaneous
- - - Load 128-bits of integer data from memory into "dst" using a non-temporal - memory hint. - "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may - be generated. - - dst[127:0] := MEM[mem_addr+127:mem_addr] - - - SSE4.1 -
smmintrin.h
- Load -
- - - - - - - - Compare packed strings with implicit lengths in "a" and "b" using the control - in "imm8", and store the generated mask in "dst". - [strcmp_note] - - size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters - UpperBound := (128 / size) - 1 - BoolRes := 0 - // compare all characters - aInvalid := 0 - bInvalid := 0 - FOR i := 0 to UpperBound - m := i*size - FOR j := 0 to UpperBound - n := j*size - BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0 - - // invalidate characters after EOS - IF a[m+size-1:m] == 0 + + + Load 128-bits of integer data from memory into "dst" using a non-temporal memory hint. + "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated. + +dst[127:0] := MEM[mem_addr+127:mem_addr] + + + SSE4.1 +
smmintrin.h
+ Load +
+ + + + + + + + Compare packed strings with implicit lengths in "a" and "b" using the control in "imm8", and store the generated mask in "dst". + [strcmp_note] + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 +BoolRes := 0 +// compare all characters +aInvalid := 0 +bInvalid := 0 +FOR i := 0 to UpperBound + m := i*size + FOR j := 0 to UpperBound + n := j*size + BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0 + + // invalidate characters after EOS + IF a[m+size-1:m] == 0 aInvalid := 1 - FI - IF b[n+size-1:n] == 0 + FI + IF b[n+size-1:n] == 0 bInvalid := 1 - FI - - // override comparisons for invalid characters - CASE (imm8[3:2]) OF - 0: // equal any + FI + + // override comparisons for invalid characters + CASE (imm8[3:2]) OF + 0: // equal any IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 FI - 1: // ranges + 1: // ranges IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 FI - 2: // equal each + 2: // equal each IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 1 + BoolRes.word[i].bit[j] := 1 FI - 3: // equal ordered + 3: // equal ordered IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 1 + BoolRes.word[i].bit[j] := 1 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 1 - FI - ESAC - ENDFOR - ENDFOR - // aggregate results - CASE (imm8[3:2]) OF - 0: // equal any - IntRes1 := 0 - FOR i := 0 to UpperBound - FOR j := 0 to UpperBound + BoolRes.word[i].bit[j] := 1 + FI + ESAC + ENDFOR +ENDFOR +// aggregate results +CASE (imm8[3:2]) OF +0: // equal any + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound IntRes1[i] := IntRes1[i] OR BoolRes.word[i].bit[j] - ENDFOR - ENDFOR - 1: // ranges - IntRes1 := 0 - FOR i := 0 to UpperBound - FOR j := 0 to UpperBound + ENDFOR + ENDFOR +1: // ranges + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound IntRes1[i] := IntRes1[i] OR (BoolRes.word[i].bit[j] AND BoolRes.word[i].bit[j+1]) j += 2 - ENDFOR - ENDFOR - 2: // equal each - IntRes1 := 0 - FOR i := 0 to UpperBound - IntRes1[i] := BoolRes.word[i].bit[i] - ENDFOR - 3: // equal ordered - IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) - FOR i := 0 to UpperBound - k := i - FOR j := 0 to UpperBound-i + ENDFOR + ENDFOR +2: // equal each + IntRes1 := 0 + FOR i := 0 to UpperBound + IntRes1[i] := BoolRes.word[i].bit[i] + ENDFOR +3: // equal ordered + IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) + FOR i := 0 to UpperBound + k := i + FOR j := 0 to UpperBound-i IntRes1[i] := IntRes1[i] AND BoolRes.word[k].bit[j] k := k+1 - ENDFOR - ENDFOR - ESAC - // optionally negate results - bInvalid := 0 - FOR i := 0 to UpperBound - IF imm8[4] - IF imm8[5] // only negate valid + ENDFOR + ENDFOR +ESAC +// optionally negate results +bInvalid := 0 +FOR i := 0 to UpperBound + IF imm8[4] + IF imm8[5] // only negate valid IF b[n+size-1:n] == 0 - bInvalid := 1 + bInvalid := 1 FI IF bInvalid // invalid, don't negate - IntRes2[i] := IntRes1[i] + IntRes2[i] := IntRes1[i] ELSE // valid, negate - IntRes2[i] := -1 XOR IntRes1[i] + IntRes2[i] := -1 XOR IntRes1[i] FI - ELSE // negate all + ELSE // negate all IntRes2[i] := -1 XOR IntRes1[i] - FI - ELSE // don't negate - IntRes2[i] := IntRes1[i] - FI - ENDFOR - // output - IF imm8[6] // byte / word mask - FOR i := 0 to UpperBound - j := i*size - IF IntRes2[i] + FI + ELSE // don't negate + IntRes2[i] := IntRes1[i] + FI +ENDFOR +// output +IF imm8[6] // byte / word mask + FOR i := 0 to UpperBound + j := i*size + IF IntRes2[i] dst[j+size-1:j] := (imm8[0] ? 0xFF : 0xFFFF) - ELSE + ELSE dst[j+size-1:j] := 0 - FI - ENDFOR - ELSE // bit mask - dst[UpperBound:0] := IntRes2[UpperBound:0] - dst[127:UpperBound+1] := 0 - FI - - - SSE4.2 -
nmmintrin.h
- String Compare + FI + ENDFOR +ELSE // bit mask + dst[UpperBound:0] := IntRes2[UpperBound:0] + dst[127:UpperBound+1] := 0 +FI + + + SSE4.2 +
nmmintrin.h
+ String Compare
- - - - - Compare packed strings with implicit lengths in "a" and "b" using the control - in "imm8", and store the generated index in "dst". - [strcmp_note] - - size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters - UpperBound := (128 / size) - 1 - BoolRes := 0 - // compare all characters - aInvalid := 0 - bInvalid := 0 - FOR i := 0 to UpperBound - m := i*size - FOR j := 0 to UpperBound - n := j*size - BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0 - - // invalidate characters after EOS - IF a[m+size-1:m] == 0 + + + + + Compare packed strings with implicit lengths in "a" and "b" using the control in "imm8", and store the generated index in "dst". + [strcmp_note] + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 +BoolRes := 0 +// compare all characters +aInvalid := 0 +bInvalid := 0 +FOR i := 0 to UpperBound + m := i*size + FOR j := 0 to UpperBound + n := j*size + BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0 + + // invalidate characters after EOS + IF a[m+size-1:m] == 0 aInvalid := 1 - FI - IF b[n+size-1:n] == 0 + FI + IF b[n+size-1:n] == 0 bInvalid := 1 - FI - - // override comparisons for invalid characters - CASE (imm8[3:2]) OF - 0: // equal any + FI + + // override comparisons for invalid characters + CASE (imm8[3:2]) OF + 0: // equal any IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 FI - 1: // ranges + 1: // ranges IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 FI - 2: // equal each + 2: // equal each IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 1 + BoolRes.word[i].bit[j] := 1 FI - 3: // equal ordered + 3: // equal ordered IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 1 + BoolRes.word[i].bit[j] := 1 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 1 - FI - ESAC - ENDFOR - ENDFOR - // aggregate results - CASE (imm8[3:2]) OF - 0: // equal any - IntRes1 := 0 - FOR i := 0 to UpperBound - FOR j := 0 to UpperBound + BoolRes.word[i].bit[j] := 1 + FI + ESAC + ENDFOR +ENDFOR +// aggregate results +CASE (imm8[3:2]) OF +0: // equal any + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound IntRes1[i] := IntRes1[i] OR BoolRes.word[i].bit[j] - ENDFOR - ENDFOR - 1: // ranges - IntRes1 := 0 - FOR i := 0 to UpperBound - FOR j := 0 to UpperBound + ENDFOR + ENDFOR +1: // ranges + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound IntRes1[i] := IntRes1[i] OR (BoolRes.word[i].bit[j] AND BoolRes.word[i].bit[j+1]) j += 2 - ENDFOR - ENDFOR - 2: // equal each - IntRes1 := 0 - FOR i := 0 to UpperBound - IntRes1[i] := BoolRes.word[i].bit[i] - ENDFOR - 3: // equal ordered - IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) - FOR i := 0 to UpperBound - k := i - FOR j := 0 to UpperBound-i + ENDFOR + ENDFOR +2: // equal each + IntRes1 := 0 + FOR i := 0 to UpperBound + IntRes1[i] := BoolRes.word[i].bit[i] + ENDFOR +3: // equal ordered + IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) + FOR i := 0 to UpperBound + k := i + FOR j := 0 to UpperBound-i IntRes1[i] := IntRes1[i] AND BoolRes.word[k].bit[j] k := k+1 - ENDFOR - ENDFOR - ESAC - // optionally negate results - bInvalid := 0 - FOR i := 0 to UpperBound - IF imm8[4] - IF imm8[5] // only negate valid + ENDFOR + ENDFOR +ESAC +// optionally negate results +bInvalid := 0 +FOR i := 0 to UpperBound + IF imm8[4] + IF imm8[5] // only negate valid IF b[n+size-1:n] == 0 - bInvalid := 1 + bInvalid := 1 FI IF bInvalid // invalid, don't negate - IntRes2[i] := IntRes1[i] + IntRes2[i] := IntRes1[i] ELSE // valid, negate - IntRes2[i] := -1 XOR IntRes1[i] + IntRes2[i] := -1 XOR IntRes1[i] FI - ELSE // negate all + ELSE // negate all IntRes2[i] := -1 XOR IntRes1[i] - FI - ELSE // don't negate - IntRes2[i] := IntRes1[i] - FI - ENDFOR - // output - IF imm8[6] // most significant bit - tmp := UpperBound - dst := tmp - DO WHILE ((tmp >= 0) AND a[tmp] == 0) - tmp := tmp - 1 - dst := tmp - OD - ELSE // least significant bit - tmp := 0 - dst := tmp - DO WHILE ((tmp <= UpperBound) AND a[tmp] == 0) - tmp := tmp + 1 - dst := tmp - OD - FI - - - SSE4.2 -
nmmintrin.h
- String Compare + FI + ELSE // don't negate + IntRes2[i] := IntRes1[i] + FI +ENDFOR +// output +IF imm8[6] // most significant bit + tmp := UpperBound + dst := tmp + DO WHILE ((tmp >= 0) AND a[tmp] == 0) + tmp := tmp - 1 + dst := tmp + OD +ELSE // least significant bit + tmp := 0 + dst := tmp + DO WHILE ((tmp <= UpperBound) AND a[tmp] == 0) + tmp := tmp + 1 + dst := tmp + OD +FI +
+ + SSE4.2 +
nmmintrin.h
+ String Compare
- - - - - Compare packed strings with implicit lengths in "a" and "b" using the control - in "imm8", and returns 1 if any character in "b" was null, and 0 otherwise. - [strcmp_note] - - size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters - UpperBound := (128 / size) - 1 - bInvalid := 0 - FOR j := 0 to UpperBound - n := j*size - IF b[n+size-1:n] == 0 - bInvalid := 1 - FI - ENDFOR - dst := bInvalid - - - SSE4.2 -
nmmintrin.h
- String Compare + + + + + Compare packed strings with implicit lengths in "a" and "b" using the control in "imm8", and returns 1 if any character in "b" was null, and 0 otherwise. + [strcmp_note] + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 +bInvalid := 0 +FOR j := 0 to UpperBound + n := j*size + IF b[n+size-1:n] == 0 + bInvalid := 1 + FI +ENDFOR +dst := bInvalid + + + SSE4.2 +
nmmintrin.h
+ String Compare
- - - - - Compare packed strings with implicit lengths in "a" and "b" using the control - in "imm8", and returns 1 if the resulting mask was non-zero, and 0 otherwise. - [strcmp_note] - - size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters - UpperBound := (128 / size) - 1 - BoolRes := 0 - // compare all characters - aInvalid := 0 - bInvalid := 0 - FOR i := 0 to UpperBound - m := i*size - FOR j := 0 to UpperBound - n := j*size - BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0 - - // invalidate characters after EOS - IF a[m+size-1:m] == 0 + + + + + Compare packed strings with implicit lengths in "a" and "b" using the control in "imm8", and returns 1 if the resulting mask was non-zero, and 0 otherwise. + [strcmp_note] + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 +BoolRes := 0 +// compare all characters +aInvalid := 0 +bInvalid := 0 +FOR i := 0 to UpperBound + m := i*size + FOR j := 0 to UpperBound + n := j*size + BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0 + + // invalidate characters after EOS + IF a[m+size-1:m] == 0 aInvalid := 1 - FI - IF b[n+size-1:n] == 0 + FI + IF b[n+size-1:n] == 0 bInvalid := 1 - FI - - // override comparisons for invalid characters - CASE (imm8[3:2]) OF - 0: // equal any + FI + + // override comparisons for invalid characters + CASE (imm8[3:2]) OF + 0: // equal any IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 FI - 1: // ranges + 1: // ranges IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 FI - 2: // equal each + 2: // equal each IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 1 + BoolRes.word[i].bit[j] := 1 FI - 3: // equal ordered + 3: // equal ordered IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 1 + BoolRes.word[i].bit[j] := 1 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 1 - FI - ESAC - ENDFOR - ENDFOR - // aggregate results - CASE (imm8[3:2]) OF - 0: // equal any - IntRes1 := 0 - FOR i := 0 to UpperBound - FOR j := 0 to UpperBound + BoolRes.word[i].bit[j] := 1 + FI + ESAC + ENDFOR +ENDFOR +// aggregate results +CASE (imm8[3:2]) OF +0: // equal any + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound IntRes1[i] := IntRes1[i] OR BoolRes.word[i].bit[j] - ENDFOR - ENDFOR - 1: // ranges - IntRes1 := 0 - FOR i := 0 to UpperBound - FOR j := 0 to UpperBound + ENDFOR + ENDFOR +1: // ranges + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound IntRes1[i] := IntRes1[i] OR (BoolRes.word[i].bit[j] AND BoolRes.word[i].bit[j+1]) j += 2 - ENDFOR - ENDFOR - 2: // equal each - IntRes1 := 0 - FOR i := 0 to UpperBound - IntRes1[i] := BoolRes.word[i].bit[i] - ENDFOR - 3: // equal ordered - IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) - FOR i := 0 to UpperBound - k := i - FOR j := 0 to UpperBound-i + ENDFOR + ENDFOR +2: // equal each + IntRes1 := 0 + FOR i := 0 to UpperBound + IntRes1[i] := BoolRes.word[i].bit[i] + ENDFOR +3: // equal ordered + IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) + FOR i := 0 to UpperBound + k := i + FOR j := 0 to UpperBound-i IntRes1[i] := IntRes1[i] AND BoolRes.word[k].bit[j] k := k+1 - ENDFOR - ENDFOR - ESAC - // optionally negate results - bInvalid := 0 - FOR i := 0 to UpperBound - IF imm8[4] - IF imm8[5] // only negate valid + ENDFOR + ENDFOR +ESAC +// optionally negate results +bInvalid := 0 +FOR i := 0 to UpperBound + IF imm8[4] + IF imm8[5] // only negate valid IF b[n+size-1:n] == 0 - bInvalid := 1 + bInvalid := 1 FI IF bInvalid // invalid, don't negate - IntRes2[i] := IntRes1[i] + IntRes2[i] := IntRes1[i] ELSE // valid, negate - IntRes2[i] := -1 XOR IntRes1[i] + IntRes2[i] := -1 XOR IntRes1[i] FI - ELSE // negate all + ELSE // negate all IntRes2[i] := -1 XOR IntRes1[i] - FI - ELSE // don't negate - IntRes2[i] := IntRes1[i] - FI - ENDFOR - // output - dst := (IntRes2 != 0) - - - SSE4.2 -
nmmintrin.h
- String Compare + FI + ELSE // don't negate + IntRes2[i] := IntRes1[i] + FI +ENDFOR +// output +dst := (IntRes2 != 0) +
+ + SSE4.2 +
nmmintrin.h
+ String Compare
- - - - - - Compare packed strings with implicit lengths in "a" and "b" using the control - in "imm8", and returns 1 if any character in "a" was null, and 0 otherwise. - [strcmp_note] - - size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters - UpperBound := (128 / size) - 1 - aInvalid := 0 - FOR i := 0 to UpperBound - m := i*size - IF a[m+size-1:m] == 0 - aInvalid := 1 - FI - ENDFOR - dst := aInvalid - - - SSE4.2 -
nmmintrin.h
- String Compare + + + + + + Compare packed strings with implicit lengths in "a" and "b" using the control in "imm8", and returns 1 if any character in "a" was null, and 0 otherwise. + [strcmp_note] + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 +aInvalid := 0 +FOR i := 0 to UpperBound + m := i*size + IF a[m+size-1:m] == 0 + aInvalid := 1 + FI +ENDFOR +dst := aInvalid + + + SSE4.2 +
nmmintrin.h
+ String Compare
- - - - - Compare packed strings with implicit lengths in "a" and "b" using the control - in "imm8", and returns bit 0 of the resulting bit mask. - [strcmp_note] - - size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters - UpperBound := (128 / size) - 1 - BoolRes := 0 - // compare all characters - aInvalid := 0 - bInvalid := 0 - FOR i := 0 to UpperBound - m := i*size - FOR j := 0 to UpperBound - n := j*size - BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0 - - // invalidate characters after EOS - IF a[m+size-1:m] == 0 + + + + + Compare packed strings with implicit lengths in "a" and "b" using the control in "imm8", and returns bit 0 of the resulting bit mask. + [strcmp_note] + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 +BoolRes := 0 +// compare all characters +aInvalid := 0 +bInvalid := 0 +FOR i := 0 to UpperBound + m := i*size + FOR j := 0 to UpperBound + n := j*size + BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0 + + // invalidate characters after EOS + IF a[m+size-1:m] == 0 aInvalid := 1 - FI - IF b[n+size-1:n] == 0 + FI + IF b[n+size-1:n] == 0 bInvalid := 1 - FI - - // override comparisons for invalid characters - CASE (imm8[3:2]) OF - 0: // equal any + FI + + // override comparisons for invalid characters + CASE (imm8[3:2]) OF + 0: // equal any IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 FI - 1: // ranges + 1: // ranges IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 FI - 2: // equal each + 2: // equal each IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 1 + BoolRes.word[i].bit[j] := 1 FI - 3: // equal ordered + 3: // equal ordered IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 1 + BoolRes.word[i].bit[j] := 1 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 1 - FI - ESAC - ENDFOR - ENDFOR - // aggregate results - CASE (imm8[3:2]) OF - 0: // equal any - IntRes1 := 0 - FOR i := 0 to UpperBound - FOR j := 0 to UpperBound + BoolRes.word[i].bit[j] := 1 + FI + ESAC + ENDFOR +ENDFOR +// aggregate results +CASE (imm8[3:2]) OF +0: // equal any + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound IntRes1[i] := IntRes1[i] OR BoolRes.word[i].bit[j] - ENDFOR - ENDFOR - 1: // ranges - IntRes1 := 0 - FOR i := 0 to UpperBound - FOR j := 0 to UpperBound + ENDFOR + ENDFOR +1: // ranges + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound IntRes1[i] := IntRes1[i] OR (BoolRes.word[i].bit[j] AND BoolRes.word[i].bit[j+1]) j += 2 - ENDFOR - ENDFOR - 2: // equal each - IntRes1 := 0 - FOR i := 0 to UpperBound - IntRes1[i] := BoolRes.word[i].bit[i] - ENDFOR - 3: // equal ordered - IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) - FOR i := 0 to UpperBound - k := i - FOR j := 0 to UpperBound-i + ENDFOR + ENDFOR +2: // equal each + IntRes1 := 0 + FOR i := 0 to UpperBound + IntRes1[i] := BoolRes.word[i].bit[i] + ENDFOR +3: // equal ordered + IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) + FOR i := 0 to UpperBound + k := i + FOR j := 0 to UpperBound-i IntRes1[i] := IntRes1[i] AND BoolRes.word[k].bit[j] k := k+1 - ENDFOR - ENDFOR - ESAC - // optionally negate results - bInvalid := 0 - FOR i := 0 to UpperBound - IF imm8[4] - IF imm8[5] // only negate valid + ENDFOR + ENDFOR +ESAC +// optionally negate results +bInvalid := 0 +FOR i := 0 to UpperBound + IF imm8[4] + IF imm8[5] // only negate valid IF b[n+size-1:n] == 0 - bInvalid := 1 + bInvalid := 1 FI IF bInvalid // invalid, don't negate - IntRes2[i] := IntRes1[i] + IntRes2[i] := IntRes1[i] ELSE // valid, negate - IntRes2[i] := -1 XOR IntRes1[i] + IntRes2[i] := -1 XOR IntRes1[i] FI - ELSE // negate all + ELSE // negate all IntRes2[i] := -1 XOR IntRes1[i] - FI - ELSE // don't negate - IntRes2[i] := IntRes1[i] - FI - ENDFOR - // output - dst := IntRes2[0] - - - SSE4.2 -
nmmintrin.h
- String Compare + FI + ELSE // don't negate + IntRes2[i] := IntRes1[i] + FI +ENDFOR +// output +dst := IntRes2[0] +
+ + SSE4.2 +
nmmintrin.h
+ String Compare
- - - - - Compare packed strings with implicit lengths in "a" and "b" using the control - in "imm8", and returns 1 if "b" did not contain a null character and the resulting mask - was zero, and 0 otherwise. - [strcmp_note] - - size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters - UpperBound := (128 / size) - 1 - BoolRes := 0 - // compare all characters - aInvalid := 0 - bInvalid := 0 - FOR i := 0 to UpperBound - m := i*size - FOR j := 0 to UpperBound - n := j*size - BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0 - - // invalidate characters after EOS - IF a[m+size-1:m] == 0 + + + + + Compare packed strings with implicit lengths in "a" and "b" using the control in "imm8", and returns 1 if "b" did not contain a null character and the resulting mask was zero, and 0 otherwise. + [strcmp_note] + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 +BoolRes := 0 +// compare all characters +aInvalid := 0 +bInvalid := 0 +FOR i := 0 to UpperBound + m := i*size + FOR j := 0 to UpperBound + n := j*size + BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0 + + // invalidate characters after EOS + IF a[m+size-1:m] == 0 aInvalid := 1 - FI - IF b[n+size-1:n] == 0 + FI + IF b[n+size-1:n] == 0 bInvalid := 1 - FI - - // override comparisons for invalid characters - CASE (imm8[3:2]) OF - 0: // equal any + FI + + // override comparisons for invalid characters + CASE (imm8[3:2]) OF + 0: // equal any IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 FI - 1: // ranges + 1: // ranges IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 FI - 2: // equal each + 2: // equal each IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 1 + BoolRes.word[i].bit[j] := 1 FI - 3: // equal ordered + 3: // equal ordered IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 1 + BoolRes.word[i].bit[j] := 1 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 1 - FI - ESAC - ENDFOR - ENDFOR - // aggregate results - CASE (imm8[3:2]) OF - 0: // equal any - IntRes1 := 0 - FOR i := 0 to UpperBound - FOR j := 0 to UpperBound + BoolRes.word[i].bit[j] := 1 + FI + ESAC + ENDFOR +ENDFOR +// aggregate results +CASE (imm8[3:2]) OF +0: // equal any + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound IntRes1[i] := IntRes1[i] OR BoolRes.word[i].bit[j] - ENDFOR - ENDFOR - 1: // ranges - IntRes1 := 0 - FOR i := 0 to UpperBound - FOR j := 0 to UpperBound + ENDFOR + ENDFOR +1: // ranges + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound IntRes1[i] := IntRes1[i] OR (BoolRes.word[i].bit[j] AND BoolRes.word[i].bit[j+1]) j += 2 - ENDFOR - ENDFOR - 2: // equal each - IntRes1 := 0 - FOR i := 0 to UpperBound - IntRes1[i] := BoolRes.word[i].bit[i] - ENDFOR - 3: // equal ordered - IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) - FOR i := 0 to UpperBound - k := i - FOR j := 0 to UpperBound-i + ENDFOR + ENDFOR +2: // equal each + IntRes1 := 0 + FOR i := 0 to UpperBound + IntRes1[i] := BoolRes.word[i].bit[i] + ENDFOR +3: // equal ordered + IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) + FOR i := 0 to UpperBound + k := i + FOR j := 0 to UpperBound-i IntRes1[i] := IntRes1[i] AND BoolRes.word[k].bit[j] k := k+1 - ENDFOR - ENDFOR - ESAC - // optionally negate results - bInvalid := 0 - FOR i := 0 to UpperBound - IF imm8[4] - IF imm8[5] // only negate valid + ENDFOR + ENDFOR +ESAC +// optionally negate results +bInvalid := 0 +FOR i := 0 to UpperBound + IF imm8[4] + IF imm8[5] // only negate valid IF b[n+size-1:n] == 0 - bInvalid := 1 + bInvalid := 1 FI IF bInvalid // invalid, don't negate - IntRes2[i] := IntRes1[i] + IntRes2[i] := IntRes1[i] ELSE // valid, negate - IntRes2[i] := -1 XOR IntRes1[i] + IntRes2[i] := -1 XOR IntRes1[i] FI - ELSE // negate all + ELSE // negate all IntRes2[i] := -1 XOR IntRes1[i] - FI - ELSE // don't negate - IntRes2[i] := IntRes1[i] - FI - ENDFOR - // output - dst := (IntRes2 == 0) AND bInvalid - - - SSE4.2 -
nmmintrin.h
- String Compare + FI + ELSE // don't negate + IntRes2[i] := IntRes1[i] + FI +ENDFOR +// output +dst := (IntRes2 == 0) AND bInvalid +
+ + SSE4.2 +
nmmintrin.h
+ String Compare
- - - - - - - Compare packed strings in "a" and "b" with lengths "la" and "lb" using the - control in "imm8", and store the generated mask in "dst". - [strcmp_note] - - size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters - UpperBound := (128 / size) - 1 - BoolRes := 0 - // compare all characters - aInvalid := 0 - bInvalid := 0 - FOR i := 0 to UpperBound - m := i*size - FOR j := 0 to UpperBound - n := j*size - BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0 - - // invalidate characters after EOS - IF i == la + + + + + + + Compare packed strings in "a" and "b" with lengths "la" and "lb" using the control in "imm8", and store the generated mask in "dst". + [strcmp_note] + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 +BoolRes := 0 +// compare all characters +aInvalid := 0 +bInvalid := 0 +FOR i := 0 to UpperBound + m := i*size + FOR j := 0 to UpperBound + n := j*size + BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0 + + // invalidate characters after EOS + IF i == la aInvalid := 1 - FI - IF j == lb + FI + IF j == lb bInvalid := 1 - FI - - // override comparisons for invalid characters - CASE (imm8[3:2]) OF - 0: // equal any + FI + + // override comparisons for invalid characters + CASE (imm8[3:2]) OF + 0: // equal any IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 FI - 1: // ranges + 1: // ranges IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 FI - 2: // equal each + 2: // equal each IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 1 + BoolRes.word[i].bit[j] := 1 FI - 3: // equal ordered + 3: // equal ordered IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 1 + BoolRes.word[i].bit[j] := 1 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 1 - FI - ESAC - ENDFOR - ENDFOR - // aggregate results - CASE (imm8[3:2]) OF - 0: // equal any - IntRes1 := 0 - FOR i := 0 to UpperBound - FOR j := 0 to UpperBound + BoolRes.word[i].bit[j] := 1 + FI + ESAC + ENDFOR +ENDFOR +// aggregate results +CASE (imm8[3:2]) OF +0: // equal any + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound IntRes1[i] := IntRes1[i] OR BoolRes.word[i].bit[j] - ENDFOR - ENDFOR - 1: // ranges - IntRes1 := 0 - FOR i := 0 to UpperBound - FOR j := 0 to UpperBound + ENDFOR + ENDFOR +1: // ranges + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound IntRes1[i] := IntRes1[i] OR (BoolRes.word[i].bit[j] AND BoolRes.word[i].bit[j+1]) j += 2 - ENDFOR - ENDFOR - 2: // equal each - IntRes1 := 0 - FOR i := 0 to UpperBound - IntRes1[i] := BoolRes.word[i].bit[i] - ENDFOR - 3: // equal ordered - IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) - FOR i := 0 to UpperBound - k := i - FOR j := 0 to UpperBound-i + ENDFOR + ENDFOR +2: // equal each + IntRes1 := 0 + FOR i := 0 to UpperBound + IntRes1[i] := BoolRes.word[i].bit[i] + ENDFOR +3: // equal ordered + IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) + FOR i := 0 to UpperBound + k := i + FOR j := 0 to UpperBound-i IntRes1[i] := IntRes1[i] AND BoolRes.word[k].bit[j] k := k+1 - ENDFOR - ENDFOR - ESAC - // optionally negate results - FOR i := 0 to UpperBound - IF imm8[4] - IF imm8[5] // only negate valid + ENDFOR + ENDFOR +ESAC +// optionally negate results +FOR i := 0 to UpperBound + IF imm8[4] + IF imm8[5] // only negate valid IF i >= lb // invalid, don't negate - IntRes2[i] := IntRes1[i] + IntRes2[i] := IntRes1[i] ELSE // valid, negate - IntRes2[i] := -1 XOR IntRes1[i] + IntRes2[i] := -1 XOR IntRes1[i] FI - ELSE // negate all + ELSE // negate all IntRes2[i] := -1 XOR IntRes1[i] - FI - ELSE // don't negate - IntRes2[i] := IntRes1[i] - FI - ENDFOR - // output - IF imm8[6] // byte / word mask - FOR i := 0 to UpperBound - j := i*size - IF IntRes2[i] + FI + ELSE // don't negate + IntRes2[i] := IntRes1[i] + FI +ENDFOR +// output +IF imm8[6] // byte / word mask + FOR i := 0 to UpperBound + j := i*size + IF IntRes2[i] dst[j+size-1:j] := (imm8[0] ? 0xFF : 0xFFFF) - ELSE + ELSE dst[j+size-1:j] := 0 - FI - ENDFOR - ELSE // bit mask - dst[UpperBound:0] := IntRes2[UpperBound:0] - dst[127:UpperBound+1] := 0 - FI - - - SSE4.2 -
nmmintrin.h
- String Compare + FI + ENDFOR +ELSE // bit mask + dst[UpperBound:0] := IntRes2[UpperBound:0] + dst[127:UpperBound+1] := 0 +FI +
+ + SSE4.2 +
nmmintrin.h
+ String Compare
- - - - - - - Compare packed strings in "a" and "b" with lengths "la" and "lb" using the - control in "imm8", and store the generated index in "dst". - [strcmp_note] - - size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters - UpperBound := (128 / size) - 1 - BoolRes := 0 - // compare all characters - aInvalid := 0 - bInvalid := 0 - FOR i := 0 to UpperBound - m := i*size - FOR j := 0 to UpperBound - n := j*size - BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0 - - // invalidate characters after EOS - IF i == la + + + + + + + Compare packed strings in "a" and "b" with lengths "la" and "lb" using the control in "imm8", and store the generated index in "dst". + [strcmp_note] + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 +BoolRes := 0 +// compare all characters +aInvalid := 0 +bInvalid := 0 +FOR i := 0 to UpperBound + m := i*size + FOR j := 0 to UpperBound + n := j*size + BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0 + + // invalidate characters after EOS + IF i == la aInvalid := 1 - FI - IF j == lb + FI + IF j == lb bInvalid := 1 - FI - - // override comparisons for invalid characters - CASE (imm8[3:2]) OF - 0: // equal any + FI + + // override comparisons for invalid characters + CASE (imm8[3:2]) OF + 0: // equal any IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 FI - 1: // ranges + 1: // ranges IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 FI - 2: // equal each + 2: // equal each IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 1 + BoolRes.word[i].bit[j] := 1 FI - 3: // equal ordered + 3: // equal ordered IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 1 + BoolRes.word[i].bit[j] := 1 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 1 - FI - ESAC - ENDFOR - ENDFOR - // aggregate results - CASE (imm8[3:2]) OF - 0: // equal any - IntRes1 := 0 - FOR i := 0 to UpperBound - FOR j := 0 to UpperBound + BoolRes.word[i].bit[j] := 1 + FI + ESAC + ENDFOR +ENDFOR +// aggregate results +CASE (imm8[3:2]) OF +0: // equal any + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound IntRes1[i] := IntRes1[i] OR BoolRes.word[i].bit[j] - ENDFOR - ENDFOR - 1: // ranges - IntRes1 := 0 - FOR i := 0 to UpperBound - FOR j := 0 to UpperBound + ENDFOR + ENDFOR +1: // ranges + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound IntRes1[i] := IntRes1[i] OR (BoolRes.word[i].bit[j] AND BoolRes.word[i].bit[j+1]) j += 2 - ENDFOR - ENDFOR - 2: // equal each - IntRes1 := 0 - FOR i := 0 to UpperBound - IntRes1[i] := BoolRes.word[i].bit[i] - ENDFOR - 3: // equal ordered - IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) - FOR i := 0 to UpperBound - k := i - FOR j := 0 to UpperBound-i + ENDFOR + ENDFOR +2: // equal each + IntRes1 := 0 + FOR i := 0 to UpperBound + IntRes1[i] := BoolRes.word[i].bit[i] + ENDFOR +3: // equal ordered + IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) + FOR i := 0 to UpperBound + k := i + FOR j := 0 to UpperBound-i IntRes1[i] := IntRes1[i] AND BoolRes.word[k].bit[j] k := k+1 - ENDFOR - ENDFOR - ESAC - // optionally negate results - FOR i := 0 to UpperBound - IF imm8[4] - IF imm8[5] // only negate valid + ENDFOR + ENDFOR +ESAC +// optionally negate results +FOR i := 0 to UpperBound + IF imm8[4] + IF imm8[5] // only negate valid IF i >= lb // invalid, don't negate - IntRes2[i] := IntRes1[i] + IntRes2[i] := IntRes1[i] ELSE // valid, negate - IntRes2[i] := -1 XOR IntRes1[i] + IntRes2[i] := -1 XOR IntRes1[i] FI - ELSE // negate all + ELSE // negate all IntRes2[i] := -1 XOR IntRes1[i] - FI - ELSE // don't negate - IntRes2[i] := IntRes1[i] - FI - ENDFOR - // output - IF imm8[6] // most significant bit - tmp := UpperBound - dst := tmp - DO WHILE ((tmp >= 0) AND a[tmp] == 0) - tmp := tmp - 1 - dst := tmp - OD - ELSE // least significant bit - tmp := 0 - dst := tmp - DO WHILE ((tmp <= UpperBound) AND a[tmp] == 0) - tmp := tmp + 1 - dst := tmp - OD - FI - - - SSE4.2 -
nmmintrin.h
- String Compare + FI + ELSE // don't negate + IntRes2[i] := IntRes1[i] + FI +ENDFOR +// output +IF imm8[6] // most significant bit + tmp := UpperBound + dst := tmp + DO WHILE ((tmp >= 0) AND a[tmp] == 0) + tmp := tmp - 1 + dst := tmp + OD +ELSE // least significant bit + tmp := 0 + dst := tmp + DO WHILE ((tmp <= UpperBound) AND a[tmp] == 0) + tmp := tmp + 1 + dst := tmp + OD +FI +
+ + SSE4.2 +
nmmintrin.h
+ String Compare
- - - - - - - Compare packed strings in "a" and "b" with lengths "la" and "lb" using the - control in "imm8", and returns 1 if any character in "b" was null, and 0 otherwise. - [strcmp_note] - - size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters - UpperBound := (128 / size) - 1 - dst := (lb <= UpperBound) - - - SSE4.2 -
nmmintrin.h
- String Compare + + + + + + + Compare packed strings in "a" and "b" with lengths "la" and "lb" using the control in "imm8", and returns 1 if any character in "b" was null, and 0 otherwise. + [strcmp_note] + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 +dst := (lb <= UpperBound) + + + SSE4.2 +
nmmintrin.h
+ String Compare
- - - - - - - Compare packed strings in "a" and "b" with lengths "la" and "lb" using the - control in "imm8", and returns 1 if the resulting mask was non-zero, and 0 otherwise. - [strcmp_note] - - size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters - UpperBound := (128 / size) - 1 - BoolRes := 0 - // compare all characters - aInvalid := 0 - bInvalid := 0 - FOR i := 0 to UpperBound - m := i*size - FOR j := 0 to UpperBound - n := j*size - BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0 - - // invalidate characters after EOS - IF i == la + + + + + + + Compare packed strings in "a" and "b" with lengths "la" and "lb" using the control in "imm8", and returns 1 if the resulting mask was non-zero, and 0 otherwise. + [strcmp_note] + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 +BoolRes := 0 +// compare all characters +aInvalid := 0 +bInvalid := 0 +FOR i := 0 to UpperBound + m := i*size + FOR j := 0 to UpperBound + n := j*size + BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0 + + // invalidate characters after EOS + IF i == la aInvalid := 1 - FI - IF j == lb + FI + IF j == lb bInvalid := 1 - FI - - // override comparisons for invalid characters - CASE (imm8[3:2]) OF - 0: // equal any + FI + + // override comparisons for invalid characters + CASE (imm8[3:2]) OF + 0: // equal any IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 FI - 1: // ranges + 1: // ranges IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 FI - 2: // equal each + 2: // equal each IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 1 + BoolRes.word[i].bit[j] := 1 FI - 3: // equal ordered + 3: // equal ordered IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 1 + BoolRes.word[i].bit[j] := 1 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 1 - FI - ESAC - ENDFOR - ENDFOR - // aggregate results - CASE (imm8[3:2]) OF - 0: // equal any - IntRes1 := 0 - FOR i := 0 to UpperBound - FOR j := 0 to UpperBound + BoolRes.word[i].bit[j] := 1 + FI + ESAC + ENDFOR +ENDFOR +// aggregate results +CASE (imm8[3:2]) OF +0: // equal any + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound IntRes1[i] := IntRes1[i] OR BoolRes.word[i].bit[j] - ENDFOR - ENDFOR - 1: // ranges - IntRes1 := 0 - FOR i := 0 to UpperBound - FOR j := 0 to UpperBound + ENDFOR + ENDFOR +1: // ranges + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound IntRes1[i] := IntRes1[i] OR (BoolRes.word[i].bit[j] AND BoolRes.word[i].bit[j+1]) j += 2 - ENDFOR - ENDFOR - 2: // equal each - IntRes1 := 0 - FOR i := 0 to UpperBound - IntRes1[i] := BoolRes.word[i].bit[i] - ENDFOR - 3: // equal ordered - IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) - FOR i := 0 to UpperBound - k := i - FOR j := 0 to UpperBound-i + ENDFOR + ENDFOR +2: // equal each + IntRes1 := 0 + FOR i := 0 to UpperBound + IntRes1[i] := BoolRes.word[i].bit[i] + ENDFOR +3: // equal ordered + IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) + FOR i := 0 to UpperBound + k := i + FOR j := 0 to UpperBound-i IntRes1[i] := IntRes1[i] AND BoolRes.word[k].bit[j] k := k+1 - ENDFOR - ENDFOR - ESAC - // optionally negate results - FOR i := 0 to UpperBound - IF imm8[4] - IF imm8[5] // only negate valid + ENDFOR + ENDFOR +ESAC +// optionally negate results +FOR i := 0 to UpperBound + IF imm8[4] + IF imm8[5] // only negate valid IF i >= lb // invalid, don't negate - IntRes2[i] := IntRes1[i] + IntRes2[i] := IntRes1[i] ELSE // valid, negate - IntRes2[i] := -1 XOR IntRes1[i] + IntRes2[i] := -1 XOR IntRes1[i] FI - ELSE // negate all + ELSE // negate all IntRes2[i] := -1 XOR IntRes1[i] - FI - ELSE // don't negate - IntRes2[i] := IntRes1[i] - FI - ENDFOR - // output - dst := (IntRes2 != 0) - - - SSE4.2 -
nmmintrin.h
- String Compare + FI + ELSE // don't negate + IntRes2[i] := IntRes1[i] + FI +ENDFOR +// output +dst := (IntRes2 != 0) +
+ + SSE4.2 +
nmmintrin.h
+ String Compare
- - - - - - - Compare packed strings in "a" and "b" with lengths "la" and "lb" using the - control in "imm8", and returns 1 if any character in "a" was null, and 0 otherwise. - [strcmp_note] - - size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters - UpperBound := (128 / size) - 1 - dst := (la <= UpperBound) - - - SSE4.2 -
nmmintrin.h
- String Compare + + + + + + + Compare packed strings in "a" and "b" with lengths "la" and "lb" using the control in "imm8", and returns 1 if any character in "a" was null, and 0 otherwise. + [strcmp_note] + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 +dst := (la <= UpperBound) + + + SSE4.2 +
nmmintrin.h
+ String Compare
- - - - - - - Compare packed strings in "a" and "b" with lengths "la" and "lb" using the - control in "imm8", and returns bit 0 of the resulting bit mask. - [strcmp_note] - - size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters - UpperBound := (128 / size) - 1 - BoolRes := 0 - // compare all characters - aInvalid := 0 - bInvalid := 0 - FOR i := 0 to UpperBound - m := i*size - FOR j := 0 to UpperBound - n := j*size - BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0 - - // invalidate characters after EOS - IF i == la + + + + + + + Compare packed strings in "a" and "b" with lengths "la" and "lb" using the control in "imm8", and returns bit 0 of the resulting bit mask. + [strcmp_note] + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 +BoolRes := 0 +// compare all characters +aInvalid := 0 +bInvalid := 0 +FOR i := 0 to UpperBound + m := i*size + FOR j := 0 to UpperBound + n := j*size + BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0 + + // invalidate characters after EOS + IF i == la aInvalid := 1 - FI - IF j == lb + FI + IF j == lb bInvalid := 1 - FI - - // override comparisons for invalid characters - CASE (imm8[3:2]) OF - 0: // equal any + FI + + // override comparisons for invalid characters + CASE (imm8[3:2]) OF + 0: // equal any IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 FI - 1: // ranges + 1: // ranges IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 FI - 2: // equal each + 2: // equal each IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 1 + BoolRes.word[i].bit[j] := 1 FI - 3: // equal ordered + 3: // equal ordered IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 1 + BoolRes.word[i].bit[j] := 1 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 1 - FI - ESAC - ENDFOR - ENDFOR - // aggregate results - CASE (imm8[3:2]) OF - 0: // equal any - IntRes1 := 0 - FOR i := 0 to UpperBound - FOR j := 0 to UpperBound + BoolRes.word[i].bit[j] := 1 + FI + ESAC + ENDFOR +ENDFOR +// aggregate results +CASE (imm8[3:2]) OF +0: // equal any + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound IntRes1[i] := IntRes1[i] OR BoolRes.word[i].bit[j] - ENDFOR - ENDFOR - 1: // ranges - IntRes1 := 0 - FOR i := 0 to UpperBound - FOR j := 0 to UpperBound + ENDFOR + ENDFOR +1: // ranges + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound IntRes1[i] := IntRes1[i] OR (BoolRes.word[i].bit[j] AND BoolRes.word[i].bit[j+1]) j += 2 - ENDFOR - ENDFOR - 2: // equal each - IntRes1 := 0 - FOR i := 0 to UpperBound - IntRes1[i] := BoolRes.word[i].bit[i] - ENDFOR - 3: // equal ordered - IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) - FOR i := 0 to UpperBound - k := i - FOR j := 0 to UpperBound-i + ENDFOR + ENDFOR +2: // equal each + IntRes1 := 0 + FOR i := 0 to UpperBound + IntRes1[i] := BoolRes.word[i].bit[i] + ENDFOR +3: // equal ordered + IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) + FOR i := 0 to UpperBound + k := i + FOR j := 0 to UpperBound-i IntRes1[i] := IntRes1[i] AND BoolRes.word[k].bit[j] k := k+1 - ENDFOR - ENDFOR - ESAC - // optionally negate results - FOR i := 0 to UpperBound - IF imm8[4] - IF imm8[5] // only negate valid + ENDFOR + ENDFOR +ESAC +// optionally negate results +FOR i := 0 to UpperBound + IF imm8[4] + IF imm8[5] // only negate valid IF i >= lb // invalid, don't negate - IntRes2[i] := IntRes1[i] + IntRes2[i] := IntRes1[i] ELSE // valid, negate - IntRes2[i] := -1 XOR IntRes1[i] + IntRes2[i] := -1 XOR IntRes1[i] FI - ELSE // negate all + ELSE // negate all IntRes2[i] := -1 XOR IntRes1[i] - FI - ELSE // don't negate - IntRes2[i] := IntRes1[i] - FI - ENDFOR - // output - dst := IntRes2[0] - - - SSE4.2 -
nmmintrin.h
- String Compare + FI + ELSE // don't negate + IntRes2[i] := IntRes1[i] + FI +ENDFOR +// output +dst := IntRes2[0] +
+ + SSE4.2 +
nmmintrin.h
+ String Compare
- - - - - - - Compare packed strings in "a" and "b" with lengths "la" and "lb" using the - control in "imm8", and returns 1 if "b" did not contain a null character and the - resulting mask was zero, and 0 otherwise. - [strcmp_note] - - size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters - UpperBound := (128 / size) - 1 - BoolRes := 0 - // compare all characters - aInvalid := 0 - bInvalid := 0 - FOR i := 0 to UpperBound - m := i*size - FOR j := 0 to UpperBound - n := j*size - BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0 - - // invalidate characters after EOS - IF i == la + + + + + + + Compare packed strings in "a" and "b" with lengths "la" and "lb" using the control in "imm8", and returns 1 if "b" did not contain a null character and the resulting mask was zero, and 0 otherwise. + [strcmp_note] + +size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters +UpperBound := (128 / size) - 1 +BoolRes := 0 +// compare all characters +aInvalid := 0 +bInvalid := 0 +FOR i := 0 to UpperBound + m := i*size + FOR j := 0 to UpperBound + n := j*size + BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0 + + // invalidate characters after EOS + IF i == la aInvalid := 1 - FI - IF j == lb + FI + IF j == lb bInvalid := 1 - FI - - // override comparisons for invalid characters - CASE (imm8[3:2]) OF - 0: // equal any + FI + + // override comparisons for invalid characters + CASE (imm8[3:2]) OF + 0: // equal any IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 FI - 1: // ranges + 1: // ranges IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 FI - 2: // equal each + 2: // equal each IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 1 + BoolRes.word[i].bit[j] := 1 FI - 3: // equal ordered + 3: // equal ordered IF (!aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 0 + BoolRes.word[i].bit[j] := 0 ELSE IF (aInvalid && !bInvalid) - BoolRes.word[i].bit[j] := 1 + BoolRes.word[i].bit[j] := 1 ELSE IF (aInvalid && bInvalid) - BoolRes.word[i].bit[j] := 1 - FI - ESAC - ENDFOR - ENDFOR - // aggregate results - CASE (imm8[3:2]) OF - 0: // equal any - IntRes1 := 0 - FOR i := 0 to UpperBound - FOR j := 0 to UpperBound + BoolRes.word[i].bit[j] := 1 + FI + ESAC + ENDFOR +ENDFOR +// aggregate results +CASE (imm8[3:2]) OF +0: // equal any + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound IntRes1[i] := IntRes1[i] OR BoolRes.word[i].bit[j] - ENDFOR - ENDFOR - 1: // ranges - IntRes1 := 0 - FOR i := 0 to UpperBound - FOR j := 0 to UpperBound + ENDFOR + ENDFOR +1: // ranges + IntRes1 := 0 + FOR i := 0 to UpperBound + FOR j := 0 to UpperBound IntRes1[i] := IntRes1[i] OR (BoolRes.word[i].bit[j] AND BoolRes.word[i].bit[j+1]) j += 2 - ENDFOR - ENDFOR - 2: // equal each - IntRes1 := 0 - FOR i := 0 to UpperBound - IntRes1[i] := BoolRes.word[i].bit[i] - ENDFOR - 3: // equal ordered - IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) - FOR i := 0 to UpperBound - k := i - FOR j := 0 to UpperBound-i + ENDFOR + ENDFOR +2: // equal each + IntRes1 := 0 + FOR i := 0 to UpperBound + IntRes1[i] := BoolRes.word[i].bit[i] + ENDFOR +3: // equal ordered + IntRes1 := (imm8[0] ? 0xFF : 0xFFFF) + FOR i := 0 to UpperBound + k := i + FOR j := 0 to UpperBound-i IntRes1[i] := IntRes1[i] AND BoolRes.word[k].bit[j] k := k+1 - ENDFOR - ENDFOR - ESAC - // optionally negate results - FOR i := 0 to UpperBound - IF imm8[4] - IF imm8[5] // only negate valid + ENDFOR + ENDFOR +ESAC +// optionally negate results +FOR i := 0 to UpperBound + IF imm8[4] + IF imm8[5] // only negate valid IF i >= lb // invalid, don't negate - IntRes2[i] := IntRes1[i] + IntRes2[i] := IntRes1[i] ELSE // valid, negate - IntRes2[i] := -1 XOR IntRes1[i] + IntRes2[i] := -1 XOR IntRes1[i] FI - ELSE // negate all + ELSE // negate all IntRes2[i] := -1 XOR IntRes1[i] - FI - ELSE // don't negate - IntRes2[i] := IntRes1[i] - FI - ENDFOR - // output - dst := (IntRes2 == 0) AND (lb > UpperBound) - - - SSE4.2 -
nmmintrin.h
- String Compare + FI + ELSE // don't negate + IntRes2[i] := IntRes1[i] + FI +ENDFOR +// output +dst := (IntRes2 == 0) AND (lb > UpperBound) +
+ + SSE4.2 +
nmmintrin.h
+ String Compare
- - - - Compare packed signed 64-bit integers in "a" and "b" for greater-than, and - store the results in "dst". - - FOR j := 0 to 1 - i := j*64 - dst[i+63:i] := ( a[i+63:i] > b[i+63:i] ) ? 0xFFFFFFFFFFFFFFFF : 0 - ENDFOR - - - SSE4.2 -
nmmintrin.h
- Compare + + + + Compare packed signed 64-bit integers in "a" and "b" for greater-than, and store the results in "dst". + +FOR j := 0 to 1 + i := j*64 + dst[i+63:i] := ( a[i+63:i] > b[i+63:i] ) ? 0xFFFFFFFFFFFFFFFF : 0 +ENDFOR + + + SSE4.2 +
nmmintrin.h
+ Compare
- - - - Starting with the initial value in "crc", accumulates a CRC32 value for - unsigned 8-bit integer "v", and stores the result in "dst". - tmp1[7:0] := v[0:7] // bit reflection - tmp2[31:0] := crc[0:31] // bit reflection - tmp3[39:0] := tmp1[7:0] << 32 - tmp4[39:0] := tmp2[31:0] << 8 - tmp5[39:0] := tmp3[39:0] XOR tmp4[39:0] - tmp6[31:0] := MOD2(tmp5[39:0], 0x11EDC6F41) // remainder from polynomial division - modulus 2 - dst[31:0] := tmp6[0:31] // bit reflection - - - SSE4.2 -
nmmintrin.h
- Cryptography + + + + Starting with the initial value in "crc", accumulates a CRC32 value for unsigned 8-bit integer "v", and stores the result in "dst". + tmp1[7:0] := v[0:7] // bit reflection +tmp2[31:0] := crc[0:31] // bit reflection +tmp3[39:0] := tmp1[7:0] << 32 +tmp4[39:0] := tmp2[31:0] << 8 +tmp5[39:0] := tmp3[39:0] XOR tmp4[39:0] +tmp6[31:0] := MOD2(tmp5[39:0], 0x11EDC6F41) // remainder from polynomial division modulus 2 +dst[31:0] := tmp6[0:31] // bit reflection + + + SSE4.2 +
nmmintrin.h
+ Cryptography
- - - - Starting with the initial value in "crc", accumulates a CRC32 value for - unsigned 16-bit integer "v", and stores the result in "dst". - tmp1[15:0] := v[0:15] // bit reflection - tmp2[31:0] := crc[0:31] // bit reflection - tmp3[47:0] := tmp1[15:0] << 32 - tmp4[47:0] := tmp2[31:0] << 16 - tmp5[47:0] := tmp3[47:0] XOR tmp4[47:0] - tmp6[31:0] := MOD2(tmp5[47:0], 0x11EDC6F41) // remainder from polynomial division - modulus 2 - dst[31:0] := tmp6[0:31] // bit reflection - - - SSE4.2 -
nmmintrin.h
- Cryptography + + + + Starting with the initial value in "crc", accumulates a CRC32 value for unsigned 16-bit integer "v", and stores the result in "dst". + tmp1[15:0] := v[0:15] // bit reflection +tmp2[31:0] := crc[0:31] // bit reflection +tmp3[47:0] := tmp1[15:0] << 32 +tmp4[47:0] := tmp2[31:0] << 16 +tmp5[47:0] := tmp3[47:0] XOR tmp4[47:0] +tmp6[31:0] := MOD2(tmp5[47:0], 0x11EDC6F41) // remainder from polynomial division modulus 2 +dst[31:0] := tmp6[0:31] // bit reflection + + + SSE4.2 +
nmmintrin.h
+ Cryptography
- - - - Starting with the initial value in "crc", accumulates a CRC32 value for - unsigned 32-bit integer "v", and stores the result in "dst". - tmp1[31:0] := v[0:31] // bit reflection - tmp2[31:0] := crc[0:31] // bit reflection - tmp3[63:0] := tmp1[31:0] << 32 - tmp4[63:0] := tmp2[31:0] << 32 - tmp5[63:0] := tmp3[63:0] XOR tmp4[63:0] - tmp6[31:0] := MOD2(tmp5[63:0], 0x11EDC6F41) // remainder from polynomial division - modulus 2 - dst[31:0] := tmp6[0:31] // bit reflection - - - SSE4.2 -
nmmintrin.h
- Cryptography + + + + Starting with the initial value in "crc", accumulates a CRC32 value for unsigned 32-bit integer "v", and stores the result in "dst". + tmp1[31:0] := v[0:31] // bit reflection +tmp2[31:0] := crc[0:31] // bit reflection +tmp3[63:0] := tmp1[31:0] << 32 +tmp4[63:0] := tmp2[31:0] << 32 +tmp5[63:0] := tmp3[63:0] XOR tmp4[63:0] +tmp6[31:0] := MOD2(tmp5[63:0], 0x11EDC6F41) // remainder from polynomial division modulus 2 +dst[31:0] := tmp6[0:31] // bit reflection + + + SSE4.2 +
nmmintrin.h
+ Cryptography
- - - - Starting with the initial value in "crc", accumulates a CRC32 value for - unsigned 64-bit integer "v", and stores the result in "dst". - tmp1[63:0] := v[0:63] // bit reflection - tmp2[31:0] := crc[0:31] // bit reflection - tmp3[95:0] := tmp1[31:0] << 32 - tmp4[95:0] := tmp2[63:0] << 64 - tmp5[95:0] := tmp3[95:0] XOR tmp4[95:0] - tmp6[31:0] := MOD2(tmp5[95:0], 0x11EDC6F41) // remainder from polynomial division - modulus 2 - dst[31:0] := tmp6[0:31] // bit reflection - - - SSE4.2 -
nmmintrin.h
- Cryptography -
- - - - - - Compute the absolute value of packed signed 8-bit integers in "a", and store - the unsigned results in "dst". - - FOR j := 0 to 7 - i := j*8 - dst[i+7:i] := ABS(Int(a[i+7:i])) - ENDFOR - - - SSSE3 -
tmmintrin.h
- Special Math Functions + + + + Starting with the initial value in "crc", accumulates a CRC32 value for unsigned 64-bit integer "v", and stores the result in "dst". + tmp1[63:0] := v[0:63] // bit reflection +tmp2[31:0] := crc[0:31] // bit reflection +tmp3[95:0] := tmp1[31:0] << 32 +tmp4[95:0] := tmp2[63:0] << 64 +tmp5[95:0] := tmp3[95:0] XOR tmp4[95:0] +tmp6[31:0] := MOD2(tmp5[95:0], 0x11EDC6F41) // remainder from polynomial division modulus 2 +dst[31:0] := tmp6[0:31] // bit reflection + + + SSE4.2 +
nmmintrin.h
+ Cryptography +
+ + + + + + Compute the absolute value of packed signed 8-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 7 + i := j*8 + dst[i+7:i] := ABS(Int(a[i+7:i])) +ENDFOR + + + SSSE3 +
tmmintrin.h
+ Special Math Functions
- - - Compute the absolute value of packed signed 8-bit integers in "a", and store - the unsigned results in "dst". - - FOR j := 0 to 15 - i := j*8 - dst[i+7:i] := ABS(a[i+7:i]) - ENDFOR - - - SSSE3 -
tmmintrin.h
- Special Math Functions + + + Compute the absolute value of packed signed 8-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 15 + i := j*8 + dst[i+7:i] := ABS(a[i+7:i]) +ENDFOR + + + SSSE3 +
tmmintrin.h
+ Special Math Functions
- - - Compute the absolute value of packed signed 16-bit integers in "a", and store - the unsigned results in "dst". - - FOR j := 0 to 3 - i := j*16 - dst[i+15:i] := ABS(Int(a[i+15:i])) - ENDFOR - - - SSSE3 -
tmmintrin.h
- Special Math Functions + + + Compute the absolute value of packed signed 16-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := ABS(Int(a[i+15:i])) +ENDFOR + + + SSSE3 +
tmmintrin.h
+ Special Math Functions
- - - Compute the absolute value of packed signed 16-bit integers in "a", and store - the unsigned results in "dst". - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := ABS(a[i+15:i]) - ENDFOR - - - SSSE3 -
tmmintrin.h
- Special Math Functions + + + Compute the absolute value of packed signed 16-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := ABS(a[i+15:i]) +ENDFOR + + + SSSE3 +
tmmintrin.h
+ Special Math Functions
- - - Compute the absolute value of packed signed 32-bit integers in "a", and store - the unsigned results in "dst". - - FOR j := 0 to 1 - i := j*32 - dst[i+31:i] := ABS(a[i+31:i]) - ENDFOR - - - SSSE3 -
tmmintrin.h
- Special Math Functions + + + Compute the absolute value of packed signed 32-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 1 + i := j*32 + dst[i+31:i] := ABS(a[i+31:i]) +ENDFOR + + + SSSE3 +
tmmintrin.h
+ Special Math Functions
- - - Compute the absolute value of packed signed 32-bit integers in "a", and store - the unsigned results in "dst". - - FOR j := 0 to 3 - i := j*32 - dst[i+31:i] := ABS(a[i+31:i]) - ENDFOR - - - SSSE3 -
tmmintrin.h
- Special Math Functions + + + Compute the absolute value of packed signed 32-bit integers in "a", and store the unsigned results in "dst". + +FOR j := 0 to 3 + i := j*32 + dst[i+31:i] := ABS(a[i+31:i]) +ENDFOR + + + SSSE3 +
tmmintrin.h
+ Special Math Functions
- - - - Shuffle packed 8-bit integers in "a" according to shuffle control mask in the - corresponding 8-bit element of "b", and store the results in "dst". - - FOR j := 0 to 15 - i := j*8 - IF b[i+7] == 1 - dst[i+7:i] := 0 - ELSE - index[3:0] := b[i+3:i] - dst[i+7:i] := a[index*8+7:index*8] - FI - ENDFOR - - - SSSE3 -
tmmintrin.h
- Swizzle + + + + Shuffle packed 8-bit integers in "a" according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst". + +FOR j := 0 to 15 + i := j*8 + IF b[i+7] == 1 + dst[i+7:i] := 0 + ELSE + index[3:0] := b[i+3:i] + dst[i+7:i] := a[index*8+7:index*8] + FI +ENDFOR + + + SSSE3 +
tmmintrin.h
+ Swizzle
- - - - Shuffle packed 8-bit integers in "a" according to shuffle control mask in the - corresponding 8-bit element of "b", and store the results in "dst". - - FOR j := 0 to 7 - i := j*8 - IF b[i+7] == 1 - dst[i+7:i] := 0 - ELSE - index[2:0] := b[i+2:i] - dst[i+7:i] := a[index*8+7:index*8] - FI - ENDFOR - - - SSSE3 -
tmmintrin.h
- Swizzle + + + + Shuffle packed 8-bit integers in "a" according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst". + +FOR j := 0 to 7 + i := j*8 + IF b[i+7] == 1 + dst[i+7:i] := 0 + ELSE + index[2:0] := b[i+2:i] + dst[i+7:i] := a[index*8+7:index*8] + FI +ENDFOR + + + SSSE3 +
tmmintrin.h
+ Swizzle
- - - - - Concatenate 16-byte blocks in "a" and "b" into a 32-byte temporary result, - shift the result right by "imm8" bytes, and store the low 16 bytes in "dst". - - tmp[255:0] := ((a[127:0] << 128)[255:0] OR b[127:0]) >> (imm8*8) - dst[127:0] := tmp[127:0] - - - SSSE3 -
tmmintrin.h
- Miscellaneous + + + + + Concatenate 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst". + +tmp[255:0] := ((a[127:0] << 128)[255:0] OR b[127:0]) >> (imm8*8) +dst[127:0] := tmp[127:0] + + + SSSE3 +
tmmintrin.h
+ Miscellaneous
- - - - - Concatenate 8-byte blocks in "a" and "b" into a 16-byte temporary result, shift - the result right by "imm8" bytes, and store the low 16 bytes in "dst". - - tmp[127:0] := ((a[63:0] << 64)[127:0] OR b[63:0]) >> (imm8*8) - dst[63:0] := tmp[63:0] - - - SSSE3 -
tmmintrin.h
- Miscellaneous + + + + + Concatenate 8-byte blocks in "a" and "b" into a 16-byte temporary result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst". + +tmp[127:0] := ((a[63:0] << 64)[127:0] OR b[63:0]) >> (imm8*8) +dst[63:0] := tmp[63:0] + + + SSSE3 +
tmmintrin.h
+ Miscellaneous
- - - - Horizontally add adjacent pairs of 16-bit integers in "a" and "b", and pack the - signed 16-bit results in "dst". - - dst[15:0] := a[31:16] + a[15:0] - dst[31:16] := a[63:48] + a[47:32] - dst[47:32] := a[95:80] + a[79:64] - dst[63:48] := a[127:112] + a[111:96] - dst[79:64] := b[31:16] + b[15:0] - dst[95:80] := b[63:48] + b[47:32] - dst[111:96] := b[95:80] + b[79:64] - dst[127:112] := b[127:112] + b[111:96] - - - SSSE3 -
tmmintrin.h
- Arithmetic + + + + Horizontally add adjacent pairs of 16-bit integers in "a" and "b", and pack the signed 16-bit results in "dst". + +dst[15:0] := a[31:16] + a[15:0] +dst[31:16] := a[63:48] + a[47:32] +dst[47:32] := a[95:80] + a[79:64] +dst[63:48] := a[127:112] + a[111:96] +dst[79:64] := b[31:16] + b[15:0] +dst[95:80] := b[63:48] + b[47:32] +dst[111:96] := b[95:80] + b[79:64] +dst[127:112] := b[127:112] + b[111:96] + + + SSSE3 +
tmmintrin.h
+ Arithmetic
- - - - Horizontally add adjacent pairs of signed 16-bit integers in "a" and "b" using - saturation, and pack the signed 16-bit results in "dst". - - dst[15:0] := Saturate16(a[31:16] + a[15:0]) - dst[31:16] := Saturate16(a[63:48] + a[47:32]) - dst[47:32] := Saturate16(a[95:80] + a[79:64]) - dst[63:48] := Saturate16(a[127:112] + a[111:96]) - dst[79:64] := Saturate16(b[31:16] + b[15:0]) - dst[95:80] := Saturate16(b[63:48] + b[47:32]) - dst[111:96] := Saturate16(b[95:80] + b[79:64]) - dst[127:112] := Saturate16(b[127:112] + b[111:96]) - - - SSSE3 -
tmmintrin.h
- Arithmetic + + + + Horizontally add adjacent pairs of signed 16-bit integers in "a" and "b" using saturation, and pack the signed 16-bit results in "dst". + +dst[15:0] := Saturate16(a[31:16] + a[15:0]) +dst[31:16] := Saturate16(a[63:48] + a[47:32]) +dst[47:32] := Saturate16(a[95:80] + a[79:64]) +dst[63:48] := Saturate16(a[127:112] + a[111:96]) +dst[79:64] := Saturate16(b[31:16] + b[15:0]) +dst[95:80] := Saturate16(b[63:48] + b[47:32]) +dst[111:96] := Saturate16(b[95:80] + b[79:64]) +dst[127:112] := Saturate16(b[127:112] + b[111:96]) + + + SSSE3 +
tmmintrin.h
+ Arithmetic
- - - - Horizontally add adjacent pairs of 32-bit integers in "a" and "b", and pack the - signed 32-bit results in "dst". - - dst[31:0] := a[63:32] + a[31:0] - dst[63:32] := a[127:96] + a[95:64] - dst[95:64] := b[63:32] + b[31:0] - dst[127:96] := b[127:96] + b[95:64] - - - SSSE3 -
tmmintrin.h
- Arithmetic + + + + Horizontally add adjacent pairs of 32-bit integers in "a" and "b", and pack the signed 32-bit results in "dst". + +dst[31:0] := a[63:32] + a[31:0] +dst[63:32] := a[127:96] + a[95:64] +dst[95:64] := b[63:32] + b[31:0] +dst[127:96] := b[127:96] + b[95:64] + + + SSSE3 +
tmmintrin.h
+ Arithmetic
- - - - Horizontally add adjacent pairs of 16-bit integers in "a" and "b", and pack the - signed 16-bit results in "dst". - - dst[15:0] := a[31:16] + a[15:0] - dst[31:16] := a[63:48] + a[47:32] - dst[47:32] := b[31:16] + b[15:0] - dst[63:48] := b[63:48] + b[47:32] - - - SSSE3 -
tmmintrin.h
- Arithmetic + + + + Horizontally add adjacent pairs of 16-bit integers in "a" and "b", and pack the signed 16-bit results in "dst". + +dst[15:0] := a[31:16] + a[15:0] +dst[31:16] := a[63:48] + a[47:32] +dst[47:32] := b[31:16] + b[15:0] +dst[63:48] := b[63:48] + b[47:32] + + + SSSE3 +
tmmintrin.h
+ Arithmetic
- - - - Horizontally add adjacent pairs of 32-bit integers in "a" and "b", and pack the - signed 32-bit results in "dst". - - dst[31:0] := a[63:32] + a[31:0] - dst[63:32] := b[63:32] + b[31:0] - - - SSSE3 -
tmmintrin.h
- Arithmetic + + + + Horizontally add adjacent pairs of 32-bit integers in "a" and "b", and pack the signed 32-bit results in "dst". + +dst[31:0] := a[63:32] + a[31:0] +dst[63:32] := b[63:32] + b[31:0] + + + SSSE3 +
tmmintrin.h
+ Arithmetic
- - - - Horizontally add adjacent pairs of signed 16-bit integers in "a" and "b" using - saturation, and pack the signed 16-bit results in "dst". - - dst[15:0] := Saturate16(a[31:16] + a[15:0]) - dst[31:16] := Saturate16(a[63:48] + a[47:32]) - dst[47:32] := Saturate16(b[31:16] + b[15:0]) - dst[63:48] := Saturate16(b[63:48] + b[47:32]) - - - SSSE3 -
tmmintrin.h
- Arithmetic + + + + Horizontally add adjacent pairs of signed 16-bit integers in "a" and "b" using saturation, and pack the signed 16-bit results in "dst". + +dst[15:0] := Saturate16(a[31:16] + a[15:0]) +dst[31:16] := Saturate16(a[63:48] + a[47:32]) +dst[47:32] := Saturate16(b[31:16] + b[15:0]) +dst[63:48] := Saturate16(b[63:48] + b[47:32]) + + + SSSE3 +
tmmintrin.h
+ Arithmetic
- - - - Horizontally subtract adjacent pairs of 16-bit integers in "a" and "b", and - pack the signed 16-bit results in "dst". - - dst[15:0] := a[15:0] - a[31:16] - dst[31:16] := a[47:32] - a[63:48] - dst[47:32] := a[79:64] - a[95:80] - dst[63:48] := a[111:96] - a[127:112] - dst[79:64] := b[15:0] - b[31:16] - dst[95:80] := b[47:32] - b[63:48] - dst[111:96] := b[79:64] - b[95:80] - dst[127:112] := b[111:96] - b[127:112] - - - SSSE3 -
tmmintrin.h
- Arithmetic + + + + Horizontally subtract adjacent pairs of 16-bit integers in "a" and "b", and pack the signed 16-bit results in "dst". + +dst[15:0] := a[15:0] - a[31:16] +dst[31:16] := a[47:32] - a[63:48] +dst[47:32] := a[79:64] - a[95:80] +dst[63:48] := a[111:96] - a[127:112] +dst[79:64] := b[15:0] - b[31:16] +dst[95:80] := b[47:32] - b[63:48] +dst[111:96] := b[79:64] - b[95:80] +dst[127:112] := b[111:96] - b[127:112] + + + SSSE3 +
tmmintrin.h
+ Arithmetic
- - - - Horizontally subtract adjacent pairs of signed 16-bit integers in "a" and "b" - using saturation, and pack the signed 16-bit results in "dst". - - dst[15:0] := Saturate16(a[15:0] - a[31:16]) - dst[31:16] := Saturate16(a[47:32] - a[63:48]) - dst[47:32] := Saturate16(a[79:64] - a[95:80]) - dst[63:48] := Saturate16(a[111:96] - a[127:112]) - dst[79:64] := Saturate16(b[15:0] - b[31:16]) - dst[95:80] := Saturate16(b[47:32] - b[63:48]) - dst[111:96] := Saturate16(b[79:64] - b[95:80]) - dst[127:112] := Saturate16(b[111:96] - b[127:112]) - - - SSSE3 -
tmmintrin.h
- Arithmetic + + + + Horizontally subtract adjacent pairs of signed 16-bit integers in "a" and "b" using saturation, and pack the signed 16-bit results in "dst". + +dst[15:0] := Saturate16(a[15:0] - a[31:16]) +dst[31:16] := Saturate16(a[47:32] - a[63:48]) +dst[47:32] := Saturate16(a[79:64] - a[95:80]) +dst[63:48] := Saturate16(a[111:96] - a[127:112]) +dst[79:64] := Saturate16(b[15:0] - b[31:16]) +dst[95:80] := Saturate16(b[47:32] - b[63:48]) +dst[111:96] := Saturate16(b[79:64] - b[95:80]) +dst[127:112] := Saturate16(b[111:96] - b[127:112]) + + + SSSE3 +
tmmintrin.h
+ Arithmetic
- - - - Horizontally subtract adjacent pairs of 32-bit integers in "a" and "b", and - pack the signed 32-bit results in "dst". - - dst[31:0] := a[31:0] - a[63:32] - dst[63:32] := a[95:64] - a[127:96] - dst[95:64] := b[31:0] - b[63:32] - dst[127:96] := b[95:64] - b[127:96] - - - SSSE3 -
tmmintrin.h
- Arithmetic + + + + Horizontally subtract adjacent pairs of 32-bit integers in "a" and "b", and pack the signed 32-bit results in "dst". + +dst[31:0] := a[31:0] - a[63:32] +dst[63:32] := a[95:64] - a[127:96] +dst[95:64] := b[31:0] - b[63:32] +dst[127:96] := b[95:64] - b[127:96] + + + SSSE3 +
tmmintrin.h
+ Arithmetic
- - - - Horizontally subtract adjacent pairs of 16-bit integers in "a" and "b", and - pack the signed 16-bit results in "dst". - - dst[15:0] := a[15:0] - a[31:16] - dst[31:16] := a[47:32] - a[63:48] - dst[47:32] := b[15:0] - b[31:16] - dst[63:48] := b[47:32] - b[63:48] - - - SSSE3 -
tmmintrin.h
- Arithmetic + + + + Horizontally subtract adjacent pairs of 16-bit integers in "a" and "b", and pack the signed 16-bit results in "dst". + +dst[15:0] := a[15:0] - a[31:16] +dst[31:16] := a[47:32] - a[63:48] +dst[47:32] := b[15:0] - b[31:16] +dst[63:48] := b[47:32] - b[63:48] + + + SSSE3 +
tmmintrin.h
+ Arithmetic
- - - - Horizontally subtract adjacent pairs of 32-bit integers in "a" and "b", and - pack the signed 32-bit results in "dst". - - dst[31:0] := a[31:0] - a[63:32] - dst[63:32] := b[31:0] - b[63:32] - - - SSSE3 -
tmmintrin.h
- Arithmetic + + + + Horizontally subtract adjacent pairs of 32-bit integers in "a" and "b", and pack the signed 32-bit results in "dst". + +dst[31:0] := a[31:0] - a[63:32] +dst[63:32] := b[31:0] - b[63:32] + + + SSSE3 +
tmmintrin.h
+ Arithmetic
- - - - Horizontally subtract adjacent pairs of signed 16-bit integers in "a" and "b" - using saturation, and pack the signed 16-bit results in "dst". - - dst[15:0] := Saturate16(a[15:0] - a[31:16]) - dst[31:16] := Saturate16(a[47:32] - a[63:48]) - dst[47:32] := Saturate16(b[15:0] - b[31:16]) - dst[63:48] := Saturate16(b[47:32] - b[63:48]) - - - SSSE3 -
tmmintrin.h
- Arithmetic + + + + Horizontally subtract adjacent pairs of signed 16-bit integers in "a" and "b" using saturation, and pack the signed 16-bit results in "dst". + +dst[15:0] := Saturate16(a[15:0] - a[31:16]) +dst[31:16] := Saturate16(a[47:32] - a[63:48]) +dst[47:32] := Saturate16(b[15:0] - b[31:16]) +dst[63:48] := Saturate16(b[47:32] - b[63:48]) + + + SSSE3 +
tmmintrin.h
+ Arithmetic
- - - - Vertically multiply each unsigned 8-bit integer from "a" with the corresponding - signed 8-bit integer from "b", producing intermediate signed 16-bit integers. - Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the - saturated results in "dst". - - FOR j := 0 to 7 - i := j*16 - dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) - ENDFOR - - - SSSE3 -
tmmintrin.h
- Arithmetic + + + + Vertically multiply each unsigned 8-bit integer from "a" with the corresponding signed 8-bit integer from "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst". + +FOR j := 0 to 7 + i := j*16 + dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) +ENDFOR + + + SSSE3 +
tmmintrin.h
+ Arithmetic
- - - - Vertically multiply each unsigned 8-bit integer from "a" with the corresponding - signed 8-bit integer from "b", producing intermediate signed 16-bit integers. - Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the - saturated results in "dst". - - FOR j := 0 to 3 - i := j*16 - dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) - ENDFOR - - - SSSE3 -
tmmintrin.h
- Arithmetic + + + + Vertically multiply each unsigned 8-bit integer from "a" with the corresponding signed 8-bit integer from "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst". + +FOR j := 0 to 3 + i := j*16 + dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] ) +ENDFOR + + + SSSE3 +
tmmintrin.h
+ Arithmetic
- - - - Multiply packed signed 16-bit integers in "a" and "b", producing intermediate - signed 32-bit integers. Truncate each intermediate integer to the 18 most significant - bits, round by adding 1, and store bits [16:1] to "dst". - - FOR j := 0 to 7 - i := j*16 - tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) >> 14) + 1 - dst[i+15:i] := tmp[16:1] - ENDFOR - - - SSSE3 -
tmmintrin.h
- Arithmetic + + + + Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst". + +FOR j := 0 to 7 + i := j*16 + tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) >> 14) + 1 + dst[i+15:i] := tmp[16:1] +ENDFOR + + + SSSE3 +
tmmintrin.h
+ Arithmetic
- - - - Multiply packed signed 16-bit integers in "a" and "b", producing intermediate - signed 32-bit integers. Truncate each intermediate integer to the 18 most significant - bits, round by adding 1, and store bits [16:1] to "dst". - - FOR j := 0 to 3 - i := j*16 - tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) >> 14) + 1 - dst[i+15:i] := tmp[16:1] - ENDFOR - - - SSSE3 -
tmmintrin.h
- Arithmetic + + + + Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst". + +FOR j := 0 to 3 + i := j*16 + tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) >> 14) + 1 + dst[i+15:i] := tmp[16:1] +ENDFOR + + + SSSE3 +
tmmintrin.h
+ Arithmetic
- - - - Negate packed 8-bit integers in "a" when the corresponding signed 8-bit integer - in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when - the corresponding element in "b" is zero. - - FOR j := 0 to 15 - i := j*8 - IF b[i+7:i] < 0 - dst[i+7:i] := -(a[i+7:i]) - ELSE IF b[i+7:i] == 0 - dst[i+7:i] := 0 - ELSE - dst[i+7:i] := a[i+7:i] - FI - ENDFOR - - - SSSE3 -
tmmintrin.h
- Arithmetic + + + + Negate packed 8-bit integers in "a" when the corresponding signed 8-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero. + +FOR j := 0 to 15 + i := j*8 + IF b[i+7:i] < 0 + dst[i+7:i] := -(a[i+7:i]) + ELSE IF b[i+7:i] == 0 + dst[i+7:i] := 0 + ELSE + dst[i+7:i] := a[i+7:i] + FI +ENDFOR + + + SSSE3 +
tmmintrin.h
+ Arithmetic
- - - - Negate packed 16-bit integers in "a" when the corresponding signed 16-bit - integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed - out when the corresponding element in "b" is zero. - - FOR j := 0 to 7 - i := j*16 - IF b[i+15:i] < 0 - dst[i+15:i] := -(a[i+15:i]) - ELSE IF b[i+15:i] == 0 - dst[i+15:i] := 0 - ELSE - dst[i+15:i] := a[i+15:i] - FI - ENDFOR - - - SSSE3 -
tmmintrin.h
- Arithmetic + + + + Negate packed 16-bit integers in "a" when the corresponding signed 16-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero. + +FOR j := 0 to 7 + i := j*16 + IF b[i+15:i] < 0 + dst[i+15:i] := -(a[i+15:i]) + ELSE IF b[i+15:i] == 0 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR + + + SSSE3 +
tmmintrin.h
+ Arithmetic
- - - - Negate packed 32-bit integers in "a" when the corresponding signed 32-bit - integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed - out when the corresponding element in "b" is zero. - - FOR j := 0 to 3 - i := j*32 - IF b[i+31:i] < 0 - dst[i+31:i] := -(a[i+31:i]) - ELSE IF b[i+31:i] == 0 - dst[i+31:i] := 0 - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - - - SSSE3 -
tmmintrin.h
- Arithmetic + + + + Negate packed 32-bit integers in "a" when the corresponding signed 32-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero. + +FOR j := 0 to 3 + i := j*32 + IF b[i+31:i] < 0 + dst[i+31:i] := -(a[i+31:i]) + ELSE IF b[i+31:i] == 0 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR + + + SSSE3 +
tmmintrin.h
+ Arithmetic
- - - - Negate packed 8-bit integers in "a" when the corresponding signed 8-bit integer - in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when - the corresponding element in "b" is zero. - - FOR j := 0 to 7 - i := j*8 - IF b[i+7:i] < 0 - dst[i+7:i] := -(a[i+7:i]) - ELSE IF b[i+7:i] == 0 - dst[i+7:i] := 0 - ELSE - dst[i+7:i] := a[i+7:i] - FI - ENDFOR - - - SSSE3 -
tmmintrin.h
- Arithmetic + + + + Negate packed 8-bit integers in "a" when the corresponding signed 8-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero. + +FOR j := 0 to 7 + i := j*8 + IF b[i+7:i] < 0 + dst[i+7:i] := -(a[i+7:i]) + ELSE IF b[i+7:i] == 0 + dst[i+7:i] := 0 + ELSE + dst[i+7:i] := a[i+7:i] + FI +ENDFOR + + + SSSE3 +
tmmintrin.h
+ Arithmetic
- - - - Negate packed 16-bit integers in "a" when the corresponding signed 16-bit - integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed - out when the corresponding element in "b" is zero. - - FOR j := 0 to 3 - i := j*16 - IF b[i+15:i] < 0 - dst[i+15:i] := -(a[i+15:i]) - ELSE IF b[i+15:i] == 0 - dst[i+15:i] := 0 - ELSE - dst[i+15:i] := a[i+15:i] - FI - ENDFOR - - - SSSE3 -
tmmintrin.h
- Arithmetic + + + + Negate packed 16-bit integers in "a" when the corresponding signed 16-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero. + +FOR j := 0 to 3 + i := j*16 + IF b[i+15:i] < 0 + dst[i+15:i] := -(a[i+15:i]) + ELSE IF b[i+15:i] == 0 + dst[i+15:i] := 0 + ELSE + dst[i+15:i] := a[i+15:i] + FI +ENDFOR + + + SSSE3 +
tmmintrin.h
+ Arithmetic
- - - - Negate packed 32-bit integers in "a" when the corresponding signed 32-bit - integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed - out when the corresponding element in "b" is zero. - - FOR j := 0 to 1 - i := j*32 - IF b[i+31:i] < 0 - dst[i+31:i] := -(a[i+31:i]) - ELSE IF b[i+31:i] == 0 - dst[i+31:i] := 0 - ELSE - dst[i+31:i] := a[i+31:i] - FI - ENDFOR - - - SSSE3 -
tmmintrin.h
- Arithmetic -
- - - - - - Copy the current 64-bit value of the processor's time-stamp counter into "dst". - dst[63:0] := TimeStampCounter - - - TSC -
immintrin.h
- General Support -
- - - - - Mark the start of a TSX (HLE/RTM) suspend load address tracking region. If this - is used inside a transactional region, subsequent loads are not added to the read set of - the transaction. If this is used inside a suspend load address tracking region it will - cause transaction abort. If this is used outside of a transactional region it behaves - like a NOP. - - TSXLDTRK -
immintrin.h
- Miscellaneous + + + + Negate packed 32-bit integers in "a" when the corresponding signed 32-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero. + +FOR j := 0 to 1 + i := j*32 + IF b[i+31:i] < 0 + dst[i+31:i] := -(a[i+31:i]) + ELSE IF b[i+31:i] == 0 + dst[i+31:i] := 0 + ELSE + dst[i+31:i] := a[i+31:i] + FI +ENDFOR + + + SSSE3 +
tmmintrin.h
+ Arithmetic +
+ + + + + + Copy the current 64-bit value of the processor's time-stamp counter into "dst". + dst[63:0] := TimeStampCounter + + + TSC +
immintrin.h
+ General Support +
+ + + + + Mark the start of a TSX (HLE/RTM) suspend load address tracking region. If this is used inside a transactional region, subsequent loads are not added to the read set of the transaction. If this is used inside a suspend load address tracking region it will cause transaction abort. If this is used outside of a transactional region it behaves like a NOP. + + TSXLDTRK +
immintrin.h
+ Miscellaneous
- - Mark the end of a TSX (HLE/RTM) suspend load address tracking region. If this - is used inside a suspend load address tracking region it will end the suspend region and - all following load addresses will be added to the transaction read set. If this is used - inside an active transaction but not in a suspend region it will cause transaction - abort. If this is used outside of a transactional region it behaves like a NOP. - - TSXLDTRK -
immintrin.h
- Miscellaneous -
- - - + + Mark the end of a TSX (HLE/RTM) suspend load address tracking region. If this is used inside a suspend load address tracking region it will end the suspend region and all following load addresses will be added to the transaction read set. If this is used inside an active transaction but not in a suspend region it will cause transaction abort. If this is used outside of a transactional region it behaves like a NOP. + + TSXLDTRK +
immintrin.h
+ Miscellaneous +
+ + + Clear the user interrupt flag (UIF). - UINTR -
immintrin.h
- General Support + UINTR +
immintrin.h
+ General Support
Send user interprocessor interrupts specified in unsigned 64-bit integer "__a". - UINTR -
immintrin.h
- General Support + UINTR +
immintrin.h
+ General Support
Sets the user interrupt flag (UIF). - UINTR -
immintrin.h
- General Support + UINTR +
immintrin.h
+ General Support
Store the current user interrupt flag (UIF) in unsigned 8-bit integer "dst". - UINTR -
immintrin.h
- General Support -
- - - - - Reads the contents of a 64-bit MSR specified in "__A" into "dst". - DEST := MSR[__A] - - - USER_MSR -
x86gprintrin.h
- General Support -
- - - - - Writes the contents of "__B" into the 64-bit MSR specified in "__A". - MSR[__A] := __B - - - USER_MSR -
x86gprintrin.h
- General Support -
- - - - - Perform the last round of an AES encryption flow on data (state) in "a" using - the round key in "RoundKey", and store the results in "dst"." - FOR j := 0 to 1 - i := j*128 - a[i+127:i] := ShiftRows(a[i+127:i]) - a[i+127:i] := SubBytes(a[i+127:i]) - dst[i+127:i] := a[i+127:i] XOR RoundKey[i+127:i] - ENDFOR - dst[MAX:256] := 0 - - - VAES - AVX512VL -
immintrin.h
- Cryptography + UINTR +
immintrin.h
+ General Support +
+ + + + + Reads the contents of a 64-bit MSR specified in "__A" into "dst". + DEST := MSR[__A] + + + USER_MSR +
x86gprintrin.h
+ General Support +
+ + + + + Writes the contents of "__B" into the 64-bit MSR specified in "__A". + MSR[__A] := __B + + + USER_MSR +
x86gprintrin.h
+ General Support +
+ + + + + Perform the last round of an AES encryption flow on data (state) in "a" using the round key in "RoundKey", and store the results in "dst"." + FOR j := 0 to 1 + i := j*128 + a[i+127:i] := ShiftRows(a[i+127:i]) + a[i+127:i] := SubBytes(a[i+127:i]) + dst[i+127:i] := a[i+127:i] XOR RoundKey[i+127:i] +ENDFOR +dst[MAX:256] := 0 + + + VAES + AVX512VL +
immintrin.h
+ Cryptography
- - - - Perform one round of an AES encryption flow on data (state) in "a" using the - round key in "RoundKey", and store the results in "dst"." - FOR j := 0 to 1 - i := j*128 - a[i+127:i] := ShiftRows(a[i+127:i]) - a[i+127:i] := SubBytes(a[i+127:i]) - a[i+127:i] := MixColumns(a[i+127:i]) - dst[i+127:i] := a[i+127:i] XOR RoundKey[i+127:i] - ENDFOR - dst[MAX:256] := 0 - - - VAES - AVX512VL -
immintrin.h
- Cryptography + + + + Perform one round of an AES encryption flow on data (state) in "a" using the round key in "RoundKey", and store the results in "dst"." + FOR j := 0 to 1 + i := j*128 + a[i+127:i] := ShiftRows(a[i+127:i]) + a[i+127:i] := SubBytes(a[i+127:i]) + a[i+127:i] := MixColumns(a[i+127:i]) + dst[i+127:i] := a[i+127:i] XOR RoundKey[i+127:i] +ENDFOR +dst[MAX:256] := 0 + + + VAES + AVX512VL +
immintrin.h
+ Cryptography
- - - - Perform the last round of an AES decryption flow on data (state) in "a" using - the round key in "RoundKey", and store the results in "dst". - FOR j := 0 to 1 - i := j*128 - a[i+127:i] := InvShiftRows(a[i+127:i]) - a[i+127:i] := InvSubBytes(a[i+127:i]) - dst[i+127:i] := a[i+127:i] XOR RoundKey[i+127:i] - ENDFOR - dst[MAX:256] := 0 - - - VAES - AVX512VL -
immintrin.h
- Cryptography + + + + Perform the last round of an AES decryption flow on data (state) in "a" using the round key in "RoundKey", and store the results in "dst". + FOR j := 0 to 1 + i := j*128 + a[i+127:i] := InvShiftRows(a[i+127:i]) + a[i+127:i] := InvSubBytes(a[i+127:i]) + dst[i+127:i] := a[i+127:i] XOR RoundKey[i+127:i] +ENDFOR +dst[MAX:256] := 0 + + + VAES + AVX512VL +
immintrin.h
+ Cryptography
- - - - Perform one round of an AES decryption flow on data (state) in "a" using the - round key in "RoundKey", and store the results in "dst". - FOR j := 0 to 1 - i := j*128 - a[i+127:i] := InvShiftRows(a[i+127:i]) - a[i+127:i] := InvSubBytes(a[i+127:i]) - a[i+127:i] := InvMixColumns(a[i+127:i]) - dst[i+127:i] := a[i+127:i] XOR RoundKey[i+127:i] - ENDFOR - dst[MAX:256] := 0 - - - VAES - AVX512VL -
immintrin.h
- Cryptography -
- - - - - - - - Carry-less multiplication of one quadword of - 'b' by one quadword of 'c', stores - the 128-bit result in 'dst'. The immediate 'Imm8' is - used to determine which quadwords of 'b' - and 'c' should be used. - - DEFINE PCLMUL128(X,Y) { - FOR i := 0 to 63 - TMP[i] := X[ 0 ] and Y[ i ] - FOR j := 1 to i + + + + Perform one round of an AES decryption flow on data (state) in "a" using the round key in "RoundKey", and store the results in "dst". + FOR j := 0 to 1 + i := j*128 + a[i+127:i] := InvShiftRows(a[i+127:i]) + a[i+127:i] := InvSubBytes(a[i+127:i]) + a[i+127:i] := InvMixColumns(a[i+127:i]) + dst[i+127:i] := a[i+127:i] XOR RoundKey[i+127:i] +ENDFOR +dst[MAX:256] := 0 + + + VAES + AVX512VL +
immintrin.h
+ Cryptography +
+ + + + + + + + Carry-less multiplication of one quadword of + 'b' by one quadword of 'c', stores + the 128-bit result in 'dst'. The immediate 'Imm8' is + used to determine which quadwords of 'b' + and 'c' should be used. + +DEFINE PCLMUL128(X,Y) { + FOR i := 0 to 63 + TMP[i] := X[ 0 ] and Y[ i ] + FOR j := 1 to i TMP[i] := TMP[i] xor (X[ j ] and Y[ i - j ]) - ENDFOR - DEST[ i ] := TMP[ i ] - ENDFOR - FOR i := 64 to 126 - TMP[i] := 0 - FOR j := i - 63 to 63 + ENDFOR + DEST[ i ] := TMP[ i ] + ENDFOR + FOR i := 64 to 126 + TMP[i] := 0 + FOR j := i - 63 to 63 TMP[i] := TMP[i] xor (X[ j ] and Y[ i - j ]) - ENDFOR - DEST[ i ] := TMP[ i ] - ENDFOR - DEST[127] := 0 - RETURN DEST // 128b vector - } - FOR i := 0 to 1 - IF Imm8[0] == 0 - TEMP1 := b.m128[i].qword[0] - ELSE - TEMP1 := b.m128[i].qword[1] - FI - IF Imm8[4] == 0 - TEMP2 := c.m128[i].qword[0] - ELSE - TEMP2 := c.m128[i].qword[1] - FI - dst.m128[i] := PCLMUL128(TEMP1, TEMP2) - ENDFOR - dst[MAX:256] := 0 - - - VPCLMULQDQ - AVX512VL -
immintrin.h
- Application-Targeted -
- - - - - - - - Carry-less multiplication of one quadword of - 'b' by one quadword of 'c', stores - the 128-bit result in 'dst'. The immediate 'Imm8' is - used to determine which quadwords of 'b' - and 'c' should be used. - - DEFINE PCLMUL128(X,Y) { - FOR i := 0 to 63 - TMP[i] := X[ 0 ] and Y[ i ] - FOR j := 1 to i + ENDFOR + DEST[ i ] := TMP[ i ] + ENDFOR + DEST[127] := 0 + RETURN DEST // 128b vector +} +FOR i := 0 to 1 + IF Imm8[0] == 0 + TEMP1 := b.m128[i].qword[0] + ELSE + TEMP1 := b.m128[i].qword[1] + FI + IF Imm8[4] == 0 + TEMP2 := c.m128[i].qword[0] + ELSE + TEMP2 := c.m128[i].qword[1] + FI + dst.m128[i] := PCLMUL128(TEMP1, TEMP2) +ENDFOR +dst[MAX:256] := 0 + + + VPCLMULQDQ + AVX512VL +
immintrin.h
+ Application-Targeted +
+ + + + + + + + Carry-less multiplication of one quadword of + 'b' by one quadword of 'c', stores + the 128-bit result in 'dst'. The immediate 'Imm8' is + used to determine which quadwords of 'b' + and 'c' should be used. + +DEFINE PCLMUL128(X,Y) { + FOR i := 0 to 63 + TMP[i] := X[ 0 ] and Y[ i ] + FOR j := 1 to i TMP[i] := TMP[i] xor (X[ j ] and Y[ i - j ]) - ENDFOR - DEST[ i ] := TMP[ i ] - ENDFOR - FOR i := 64 to 126 - TMP[i] := 0 - FOR j := i - 63 to 63 + ENDFOR + DEST[ i ] := TMP[ i ] + ENDFOR + FOR i := 64 to 126 + TMP[i] := 0 + FOR j := i - 63 to 63 TMP[i] := TMP[i] xor (X[ j ] and Y[ i - j ]) - ENDFOR - DEST[ i ] := TMP[ i ] - ENDFOR - DEST[127] := 0 - RETURN DEST // 128b vector - } - FOR i := 0 to 3 - IF Imm8[0] == 0 - TEMP1 := b.m128[i].qword[0] - ELSE - TEMP1 := b.m128[i].qword[1] - FI - IF Imm8[4] == 0 - TEMP2 := c.m128[i].qword[0] - ELSE - TEMP2 := c.m128[i].qword[1] - FI - dst.m128[i] := PCLMUL128(TEMP1, TEMP2) - ENDFOR - dst[MAX:512] := 0 - - - VPCLMULQDQ -
immintrin.h
- Application-Targeted -
- - - - - - - Directs the processor to enter an implementation-dependent optimized state - until the TSC reaches or exceeds the value specified in "counter". Bit 0 of "ctrl" - selects between a lower power (cleared) or faster wakeup (set) optimized state. Returns - the carry flag (CF). If the processor that executed a UMWAIT instruction wakes due to - the expiration of the operating system timelimit, the instructions sets RFLAGS.CF; - otherwise, that flag is cleared. - - WAITPKG -
immintrin.h
- Miscellaneous + ENDFOR + DEST[ i ] := TMP[ i ] + ENDFOR + DEST[127] := 0 + RETURN DEST // 128b vector +} +FOR i := 0 to 3 + IF Imm8[0] == 0 + TEMP1 := b.m128[i].qword[0] + ELSE + TEMP1 := b.m128[i].qword[1] + FI + IF Imm8[4] == 0 + TEMP2 := c.m128[i].qword[0] + ELSE + TEMP2 := c.m128[i].qword[1] + FI + dst.m128[i] := PCLMUL128(TEMP1, TEMP2) +ENDFOR +dst[MAX:512] := 0 + + + VPCLMULQDQ +
immintrin.h
+ Application-Targeted +
+ + + + + + + Directs the processor to enter an implementation-dependent optimized state until the TSC reaches or exceeds the value specified in "counter". Bit 0 of "ctrl" selects between a lower power (cleared) or faster wakeup (set) optimized state. Returns the carry flag (CF). If the processor that executed a UMWAIT instruction wakes due to the expiration of the operating system timelimit, the instructions sets RFLAGS.CF; otherwise, that flag is cleared. + + WAITPKG +
immintrin.h
+ Miscellaneous
- - - - Directs the processor to enter an implementation-dependent optimized state - while monitoring a range of addresses. The instruction wakes up when the TSC reaches or - exceeds the value specified in "counter" (if the monitoring hardware did not trigger - beforehand). Bit 0 of "ctrl" selects between a lower power (cleared) or faster wakeup - (set) optimized state. Returns the carry flag (CF). If the processor that executed a - UMWAIT instruction wakes due to the expiration of the operating system timelimit, the - instructions sets RFLAGS.CF; otherwise, that flag is cleared. - - WAITPKG -
immintrin.h
- Miscellaneous + + + + Directs the processor to enter an implementation-dependent optimized state while monitoring a range of addresses. The instruction wakes up when the TSC reaches or exceeds the value specified in "counter" (if the monitoring hardware did not trigger beforehand). Bit 0 of "ctrl" selects between a lower power (cleared) or faster wakeup (set) optimized state. Returns the carry flag (CF). If the processor that executed a UMWAIT instruction wakes due to the expiration of the operating system timelimit, the instructions sets RFLAGS.CF; otherwise, that flag is cleared. + + WAITPKG +
immintrin.h
+ Miscellaneous
- - - Sets up a linear address range to be - monitored by hardware and activates the - monitor. The address range should be a writeback - memory caching type. The address is - contained in "a". - - WAITPKG -
immintrin.h
- Miscellaneous -
- - - - - - Write back and do not flush internal caches. - Initiate writing-back without flushing of external - caches. - - WBNOINVD -
immintrin.h
- Miscellaneous -
- - - - - - - Perform a full or partial save of the enabled processor states to memory at - "mem_addr"; xsavec differs from xsave in that it uses compaction and that it may use - init optimization. State is saved based on bits [62:0] in "save_mask" and "XCR0". - "mem_addr" must be aligned on a 64-byte boundary. - mask[62:0] := save_mask[62:0] AND XCR0[62:0] - FOR i := 0 to 62 - IF mask[i] - CASE (i) OF - 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87_FPU] - 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE] - DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i] - ESAC - mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i] - FI - i := i + 1 - ENDFOR - - - XSAVE - XSAVEC -
immintrin.h
- OS-Targeted + + + Sets up a linear address range to be + monitored by hardware and activates the + monitor. The address range should be a writeback + memory caching type. The address is + contained in "a". + + WAITPKG +
immintrin.h
+ Miscellaneous +
+ + + + + + Write back and do not flush internal caches. + Initiate writing-back without flushing of external + caches. + + WBNOINVD +
immintrin.h
+ Miscellaneous +
+ + + + + + + Perform a full or partial save of the enabled processor states to memory at "mem_addr"; xsavec differs from xsave in that it uses compaction and that it may use init optimization. State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" must be aligned on a 64-byte boundary. + mask[62:0] := save_mask[62:0] AND XCR0[62:0] +FOR i := 0 to 62 + IF mask[i] + CASE (i) OF + 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87_FPU] + 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE] + DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i] + ESAC + mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i] + FI + i := i + 1 +ENDFOR + + + XSAVE + XSAVEC +
immintrin.h
+ OS-Targeted
- - - - Perform a full or partial save of the enabled processor states to memory at - "mem_addr"; xsavec differs from xsave in that it uses compaction and that it may use - init optimization. State is saved based on bits [62:0] in "save_mask" and "XCR0". - "mem_addr" must be aligned on a 64-byte boundary. - mask[62:0] := save_mask[62:0] AND XCR0[62:0] - FOR i := 0 to 62 - IF mask[i] - CASE (i) OF - 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87_FPU] - 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE] - DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i] - ESAC - mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i] - FI - i := i + 1 - ENDFOR - - - XSAVE - XSAVEC -
immintrin.h
- OS-Targeted -
- - - - - - - Perform a full or partial save of the enabled processor states to memory at - "mem_addr". State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" - must be aligned on a 64-byte boundary. The hardware may optimize the manner in which - data is saved. The performance of this instruction will be equal to or better than using - the XSAVE instruction. - mask[62:0] := save_mask[62:0] AND XCR0[62:0] - FOR i := 0 to 62 - IF mask[i] - CASE (i) OF - 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87_FPU] - 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE] - 2: mem_addr.EXT_SAVE_Area2[YMM] := ProcessorState[YMM] - DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i] - ESAC - mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i] - FI - i := i + 1 - ENDFOR - - - XSAVE - XSAVEOPT -
immintrin.h
- OS-Targeted + + + + Perform a full or partial save of the enabled processor states to memory at "mem_addr"; xsavec differs from xsave in that it uses compaction and that it may use init optimization. State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" must be aligned on a 64-byte boundary. + mask[62:0] := save_mask[62:0] AND XCR0[62:0] +FOR i := 0 to 62 + IF mask[i] + CASE (i) OF + 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87_FPU] + 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE] + DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i] + ESAC + mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i] + FI + i := i + 1 +ENDFOR + + + XSAVE + XSAVEC +
immintrin.h
+ OS-Targeted +
+ + + + + + + Perform a full or partial save of the enabled processor states to memory at "mem_addr". State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" must be aligned on a 64-byte boundary. The hardware may optimize the manner in which data is saved. The performance of this instruction will be equal to or better than using the XSAVE instruction. + mask[62:0] := save_mask[62:0] AND XCR0[62:0] +FOR i := 0 to 62 + IF mask[i] + CASE (i) OF + 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87_FPU] + 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE] + 2: mem_addr.EXT_SAVE_Area2[YMM] := ProcessorState[YMM] + DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i] + ESAC + mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i] + FI + i := i + 1 +ENDFOR + + + XSAVE + XSAVEOPT +
immintrin.h
+ OS-Targeted
- - - - Perform a full or partial save of the enabled processor states to memory at - "mem_addr". State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" - must be aligned on a 64-byte boundary. The hardware may optimize the manner in which - data is saved. The performance of this instruction will be equal to or better than using - the XSAVE64 instruction. - mask[62:0] := save_mask[62:0] AND XCR0[62:0] - FOR i := 0 to 62 - IF mask[i] - CASE (i) OF - 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87_FPU] - 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE] - 2: mem_addr.EXT_SAVE_Area2[YMM] := ProcessorState[YMM] - DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i] - ESAC - mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i] - FI - i := i + 1 - ENDFOR - - - XSAVE - XSAVEOPT -
immintrin.h
- OS-Targeted -
- - - - - - - Perform a full or partial save of the enabled processor states to memory at - "mem_addr"; xsaves differs from xsave in that it can save state components corresponding - to bits set in IA32_XSS MSR and that it may use the modified optimization. State is - saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" must be aligned on a - 64-byte boundary. - mask[62:0] := save_mask[62:0] AND XCR0[62:0] - FOR i := 0 to 62 - IF mask[i] - CASE (i) OF - 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87_FPU] - 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE] - DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i] - ESAC - mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i] - FI - i := i + 1 - ENDFOR - - - XSAVE - XSS -
immintrin.h
- OS-Targeted + + + + Perform a full or partial save of the enabled processor states to memory at "mem_addr". State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" must be aligned on a 64-byte boundary. The hardware may optimize the manner in which data is saved. The performance of this instruction will be equal to or better than using the XSAVE64 instruction. + mask[62:0] := save_mask[62:0] AND XCR0[62:0] +FOR i := 0 to 62 + IF mask[i] + CASE (i) OF + 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87_FPU] + 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE] + 2: mem_addr.EXT_SAVE_Area2[YMM] := ProcessorState[YMM] + DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i] + ESAC + mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i] + FI + i := i + 1 +ENDFOR + + + XSAVE + XSAVEOPT +
immintrin.h
+ OS-Targeted +
+ + + + + + + Perform a full or partial save of the enabled processor states to memory at "mem_addr"; xsaves differs from xsave in that it can save state components corresponding to bits set in IA32_XSS MSR and that it may use the modified optimization. State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" must be aligned on a 64-byte boundary. + mask[62:0] := save_mask[62:0] AND XCR0[62:0] +FOR i := 0 to 62 + IF mask[i] + CASE (i) OF + 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87_FPU] + 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE] + DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i] + ESAC + mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i] + FI + i := i + 1 +ENDFOR + + + XSAVE + XSS +
immintrin.h
+ OS-Targeted
- - - - Perform a full or partial save of the enabled processor states to memory at - "mem_addr"; xsaves differs from xsave in that it can save state components corresponding - to bits set in IA32_XSS MSR and that it may use the modified optimization. State is - saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" must be aligned on a - 64-byte boundary. - mask[62:0] := save_mask[62:0] AND XCR0[62:0] - FOR i := 0 to 62 - IF mask[i] - CASE (i) OF - 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87_FPU] - 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE] - DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i] - ESAC - mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i] - FI - i := i + 1 - ENDFOR - - - XSAVE - XSS -
immintrin.h
- OS-Targeted + + + + Perform a full or partial save of the enabled processor states to memory at "mem_addr"; xsaves differs from xsave in that it can save state components corresponding to bits set in IA32_XSS MSR and that it may use the modified optimization. State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" must be aligned on a 64-byte boundary. + mask[62:0] := save_mask[62:0] AND XCR0[62:0] +FOR i := 0 to 62 + IF mask[i] + CASE (i) OF + 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87_FPU] + 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE] + DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i] + ESAC + mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i] + FI + i := i + 1 +ENDFOR + + + XSAVE + XSS +
immintrin.h
+ OS-Targeted
- - - - Perform a full or partial restore of the enabled processor states using the - state information stored in memory at "mem_addr". xrstors differs from xrstor in that it - can restore state components corresponding to bits set in the IA32_XSS MSR; xrstors - cannot restore from an xsave area in which the extended region is in the standard form. - State is restored based on bits [62:0] in "rs_mask", "XCR0", and - "mem_addr.HEADER.XSTATE_BV". "mem_addr" must be aligned on a 64-byte boundary. - st_mask := mem_addr.HEADER.XSTATE_BV[62:0] - FOR i := 0 to 62 - IF (rs_mask[i] AND XCR0[i]) - IF st_mask[i] + + + + Perform a full or partial restore of the enabled processor states using the state information stored in memory at "mem_addr". xrstors differs from xrstor in that it can restore state components corresponding to bits set in the IA32_XSS MSR; xrstors cannot restore from an xsave area in which the extended region is in the standard form. State is restored based on bits [62:0] in "rs_mask", "XCR0", and "mem_addr.HEADER.XSTATE_BV". "mem_addr" must be aligned on a 64-byte boundary. + st_mask := mem_addr.HEADER.XSTATE_BV[62:0] +FOR i := 0 to 62 + IF (rs_mask[i] AND XCR0[i]) + IF st_mask[i] CASE (i) OF 0: ProcessorState[x87_FPU] := mem_addr.FPUSSESave_Area[FPU] 1: ProcessorState[SSE] := mem_addr.FPUSSESaveArea[SSE] DEFAULT: ProcessorState[i] := mem_addr.Ext_Save_Area[i] ESAC - ELSE + ELSE // ProcessorExtendedState := Processor Supplied Values CASE (i) OF 1: MXCSR := mem_addr.FPUSSESave_Area[SSE] ESAC - FI - FI - i := i + 1 - ENDFOR - - - XSAVE - XSS -
immintrin.h
- OS-Targeted + FI + FI + i := i + 1 +ENDFOR +
+ + XSAVE + XSS +
immintrin.h
+ OS-Targeted
- - - - Perform a full or partial restore of the enabled processor states using the - state information stored in memory at "mem_addr". xrstors differs from xrstor in that it - can restore state components corresponding to bits set in the IA32_XSS MSR; xrstors - cannot restore from an xsave area in which the extended region is in the standard form. - State is restored based on bits [62:0] in "rs_mask", "XCR0", and - "mem_addr.HEADER.XSTATE_BV". "mem_addr" must be aligned on a 64-byte boundary. - st_mask := mem_addr.HEADER.XSTATE_BV[62:0] - FOR i := 0 to 62 - IF (rs_mask[i] AND XCR0[i]) - IF st_mask[i] + + + + Perform a full or partial restore of the enabled processor states using the state information stored in memory at "mem_addr". xrstors differs from xrstor in that it can restore state components corresponding to bits set in the IA32_XSS MSR; xrstors cannot restore from an xsave area in which the extended region is in the standard form. State is restored based on bits [62:0] in "rs_mask", "XCR0", and "mem_addr.HEADER.XSTATE_BV". "mem_addr" must be aligned on a 64-byte boundary. + st_mask := mem_addr.HEADER.XSTATE_BV[62:0] +FOR i := 0 to 62 + IF (rs_mask[i] AND XCR0[i]) + IF st_mask[i] CASE (i) OF 0: ProcessorState[x87_FPU] := mem_addr.FPUSSESave_Area[FPU] 1: ProcessorState[SSE] := mem_addr.FPUSSESaveArea[SSE] DEFAULT: ProcessorState[i] := mem_addr.Ext_Save_Area[i] ESAC - ELSE + ELSE // ProcessorExtendedState := Processor Supplied Values CASE (i) OF 1: MXCSR := mem_addr.FPUSSESave_Area[SSE] ESAC - FI - FI - i := i + 1 - ENDFOR - - - XSAVE - XSS -
immintrin.h
- OS-Targeted -
- - - - - - Copy up to 64-bits from the value of the extended control register (XCR) - specified by "a" into "dst". Currently only XFEATURE_ENABLED_MASK XCR is supported. - dst[63:0] := XCR[a] - - - XSAVE -
immintrin.h
- OS-Targeted + FI + FI + i := i + 1 +ENDFOR + + + XSAVE + XSS +
immintrin.h
+ OS-Targeted +
+ + + + + + Copy up to 64-bits from the value of the extended control register (XCR) specified by "a" into "dst". Currently only XFEATURE_ENABLED_MASK XCR is supported. + dst[63:0] := XCR[a] + + + XSAVE +
immintrin.h
+ OS-Targeted
- - - - Perform a full or partial restore of the enabled processor states using the - state information stored in memory at "mem_addr". State is restored based on bits [62:0] - in "rs_mask", "XCR0", and "mem_addr.HEADER.XSTATE_BV". "mem_addr" must be aligned on a - 64-byte boundary. - st_mask := mem_addr.HEADER.XSTATE_BV[62:0] - FOR i := 0 to 62 - IF (rs_mask[i] AND XCR0[i]) - IF st_mask[i] + + + + Perform a full or partial restore of the enabled processor states using the state information stored in memory at "mem_addr". State is restored based on bits [62:0] in "rs_mask", "XCR0", and "mem_addr.HEADER.XSTATE_BV". "mem_addr" must be aligned on a 64-byte boundary. + st_mask := mem_addr.HEADER.XSTATE_BV[62:0] +FOR i := 0 to 62 + IF (rs_mask[i] AND XCR0[i]) + IF st_mask[i] CASE (i) OF 0: ProcessorState[x87_FPU] := mem_addr.FPUSSESave_Area[FPU] 1: ProcessorState[SSE] := mem_addr.FPUSSESaveArea[SSE] DEFAULT: ProcessorState[i] := mem_addr.Ext_Save_Area[i] ESAC - ELSE + ELSE // ProcessorExtendedState := Processor Supplied Values CASE (i) OF 1: MXCSR := mem_addr.FPUSSESave_Area[SSE] ESAC - FI - FI - i := i + 1 - ENDFOR - - - XSAVE -
immintrin.h
- OS-Targeted + FI + FI + i := i + 1 +ENDFOR +
+ + XSAVE +
immintrin.h
+ OS-Targeted
- - - - Perform a full or partial restore of the enabled processor states using the - state information stored in memory at "mem_addr". State is restored based on bits [62:0] - in "rs_mask", "XCR0", and "mem_addr.HEADER.XSTATE_BV". "mem_addr" must be aligned on a - 64-byte boundary. - st_mask := mem_addr.HEADER.XSTATE_BV[62:0] - FOR i := 0 to 62 - IF (rs_mask[i] AND XCR0[i]) - IF st_mask[i] + + + + Perform a full or partial restore of the enabled processor states using the state information stored in memory at "mem_addr". State is restored based on bits [62:0] in "rs_mask", "XCR0", and "mem_addr.HEADER.XSTATE_BV". "mem_addr" must be aligned on a 64-byte boundary. + st_mask := mem_addr.HEADER.XSTATE_BV[62:0] +FOR i := 0 to 62 + IF (rs_mask[i] AND XCR0[i]) + IF st_mask[i] CASE (i) OF 0: ProcessorState[x87_FPU] := mem_addr.FPUSSESave_Area[FPU] 1: ProcessorState[SSE] := mem_addr.FPUSSESaveArea[SSE] DEFAULT: ProcessorState[i] := mem_addr.Ext_Save_Area[i] ESAC - ELSE + ELSE // ProcessorExtendedState := Processor Supplied Values CASE (i) OF 1: MXCSR := mem_addr.FPUSSESave_Area[SSE] ESAC - FI - FI - i := i + 1 - ENDFOR - - - XSAVE -
immintrin.h
- OS-Targeted + FI + FI + i := i + 1 +ENDFOR +
+ + XSAVE +
immintrin.h
+ OS-Targeted
- - - - Perform a full or partial save of the enabled processor states to memory at - "mem_addr". State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" - must be aligned on a 64-byte boundary. - mask[62:0] := save_mask[62:0] AND XCR0[62:0] - FOR i := 0 to 62 - IF mask[i] - CASE (i) OF - 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87_FPU] - 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE] - DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i] - ESAC - mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i] - FI - i := i + 1 - ENDFOR - - - XSAVE -
immintrin.h
- OS-Targeted + + + + Perform a full or partial save of the enabled processor states to memory at "mem_addr". State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" must be aligned on a 64-byte boundary. + mask[62:0] := save_mask[62:0] AND XCR0[62:0] +FOR i := 0 to 62 + IF mask[i] + CASE (i) OF + 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87_FPU] + 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE] + DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i] + ESAC + mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i] + FI + i := i + 1 +ENDFOR + + + XSAVE +
immintrin.h
+ OS-Targeted
- - - - Perform a full or partial save of the enabled processor states to memory at - "mem_addr". State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" - must be aligned on a 64-byte boundary. - mask[62:0] := save_mask[62:0] AND XCR0[62:0] - FOR i := 0 to 62 - IF mask[i] - CASE (i) OF - 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87_FPU] - 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE] - DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i] - ESAC - mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i] - FI - i := i + 1 - ENDFOR - - - XSAVE -
immintrin.h
- OS-Targeted + + + + Perform a full or partial save of the enabled processor states to memory at "mem_addr". State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" must be aligned on a 64-byte boundary. + mask[62:0] := save_mask[62:0] AND XCR0[62:0] +FOR i := 0 to 62 + IF mask[i] + CASE (i) OF + 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87_FPU] + 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE] + DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i] + ESAC + mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i] + FI + i := i + 1 +ENDFOR + + + XSAVE +
immintrin.h
+ OS-Targeted
- - - - Copy 64-bits from "val" to the extended control register (XCR) specified by - "a". Currently only XFEATURE_ENABLED_MASK XCR is supported. - - XCR[a] := val[63:0] - - - XSAVE -
immintrin.h
- OS-Targeted -
- - + + + + Copy 64-bits from "val" to the extended control register (XCR) specified by "a". Currently only XFEATURE_ENABLED_MASK XCR is supported. + +XCR[a] := val[63:0] + + + XSAVE +
immintrin.h
+ OS-Targeted +
+ +
\ No newline at end of file